diff --git a/.gitattributes b/.gitattributes index a258221540f084eaeac4fee5465823fba3768612..33c52cce94d96b16524f3e04839104adb02e1b29 100644 --- a/.gitattributes +++ b/.gitattributes @@ -989,3 +989,11 @@ data/2025/2504_16xxx/2504.16053/3a1e7b3d-76bf-4988-bb05-867bace379b5_origin.pdf data/2025/2504_16xxx/2504.16054/133197e3-ccc3-4137-8c02-e587a7cdec28_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_16xxx/2504.16072/c2b122b3-b64f-447a-8b61-fae8c862f964_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_16xxx/2504.16080/962c0f96-5cf9-4754-ab9a-8c07935bf36c_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_15xxx/2504.15485/a2c7802b-0ba4-4f59-a685-cb9d447cab8d_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_15xxx/2504.15585/c8803fe7-d918-414b-a5e2-cfaba643acbf_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_15xxx/2504.15659/c7f88426-e408-4745-97f1-882178397313_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_15xxx/2504.15714/5ed595a1-6284-4695-8ba1-1bc55e6429ae_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_15xxx/2504.15777/7d52c4ec-83bf-4780-930e-43bf666b3c1c_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_15xxx/2504.15848/c2a6d104-48f7-465b-bad3-e87ed3722daf_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_15xxx/2504.15909/41ce0363-307a-4461-bbaf-6fdf5036b2e7_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_20xxx/2504.20064/3484c7dd-5472-4f2c-ab8a-d4410cc59cb3_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/data/2025/2504_15xxx/2504.15485/a2c7802b-0ba4-4f59-a685-cb9d447cab8d_content_list.json b/data/2025/2504_15xxx/2504.15485/a2c7802b-0ba4-4f59-a685-cb9d447cab8d_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..9997ba0f7db276075d6a4fe7ba5a530975b1ec01 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/a2c7802b-0ba4-4f59-a685-cb9d447cab8d_content_list.json @@ -0,0 +1,2370 @@ +[ + { + "type": "text", + "text": "CAPTURE: Evaluating Spatial Reasoning in Vision Language Models via Occluded Object Counting", + "text_level": 1, + "bbox": [ + 143, + 128, + 854, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Atin Pothiraj", + "bbox": [ + 202, + 203, + 308, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Elias Stengel-Eskin", + "bbox": [ + 349, + 203, + 506, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jaemin Cho", + "bbox": [ + 547, + 204, + 642, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mohit Bansal", + "bbox": [ + 683, + 204, + 792, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "UNC Chapel Hill", + "bbox": [ + 428, + 222, + 568, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{atin, esteng, jmincho, mbansal}@cs.unc.edu", + "bbox": [ + 323, + 241, + 702, + 256 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 248, + 291, + 326, + 306 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recognizing and reasoning about occluded (partially or fully hidden) objects is vital to understanding visual scenes, as occlusions frequently occur in real-world environments and act as obstacles for spatial comprehension. To test models' ability to reason about multiple occluded objects, we introduce a novel task, Counting Amodally for Patterns Through Unseen REgions (CAPTURE), which requires a model to count objects arranged in a pattern by inferring how the pattern continues behind an occluder (an object which blocks parts of the scene). CAPTURE requires both recognizing visual patterns and reasoning, making it a useful testbed for evaluating vision-language models (VLMs) on whether they understand occluded patterns and possess spatial understanding skills. By requiring models to reason about occluded objects, CAPTURE also tests VLMs' ability to form world models that would allow them to fill in missing information. CAPTURE consists of two parts: (1) CAPTURE $^{\\text{real}}$ , with manually filtered images of real objects in patterns and (2) CAPTURE $^{\\text{synthetic}}$ , a controlled diagnostic with generated patterned images. We evaluate four strong VLMs (GPT-4o, Intern-VL2, Molmo, and Qwen2-VL) on CAPTURE, finding that models struggle to count on both occluded and unoccluded patterns. Crucially, we find that models perform worse with occlusion, suggesting that VLMs are also deficient in inferring unseen spatial relationships: even the strongest VLMs like GPT-4o fail to count with occlusion. In contrast, we find that humans achieve very little error on CAPTURE. We also find that providing auxiliary information of occluded object locations increases performance, underscoring that the model error comes both from an inability to handle occlusion as well as difficulty in counting in images. $^{1}$", + "bbox": [ + 86, + 323, + 483, + 806 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 91, + 825, + 220, + 840 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Inferring what lies behind different objects in occluded scenes is crucial for human perception, as it allows us to", + "bbox": [ + 89, + 847, + 482, + 878 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/981b3b6aabc89ab61a54e82e52bc90dd504b1f841da1c843d3996fb961f096a9.jpg", + "image_caption": [ + "Instruction: Count the exact number of cups in the image, assuming the pattern continues behind the black box." + ], + "image_footnote": [], + "bbox": [ + 516, + 290, + 692, + 434 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/219a1166134d1772e966ee56c5d2d9ed4a6b2d6e102dc7a24aa55784ec37d983.jpg", + "image_caption": [ + "Figure 1. CAPTURE example with an output from GPT-4o. While people can easily infer the missing number of cups and correctly reason over occluded patterns, models generally struggle to reason over these occluded scenes." + ], + "image_footnote": [], + "bbox": [ + 692, + 290, + 880, + 483 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "maintain a coherent understanding of our environment even when parts are hidden. The human visual system accomplishes this by integrating past experiences, context, and sensory inputs to reconstruct incomplete scenes [19, 27, 30, 45]. Meanwhile, recent advancements in vision-language models (VLMs) – especially in terms of visual and spatial reasoning – raise the question of whether these systems can perform similar inferential tasks. One way of measuring such capabilities is through amodal completion – the task of inferring the invisible parts of partially occluded objects; here, vision-only models are typically evaluated via dense prediction tasks like object segmentation and image inpainting [5]. However, this format is not well-suited for assessing VLMs, whose outputs consist of text tokens rather than pixel-level predictions. This raises a critical question: How can we quantify the ability of VLMs to form spatial world modeling [17] in the presence of occlusion?", + "bbox": [ + 511, + 566, + 906, + 824 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To address this, we introduce CAPTURE, Counting Amodally for Patterns Through Unseen REgions, a novel benchmark that tests a VLM's world modeling and spatial reasoning abilities through the task of amodal counting, where models are prompted to count occluded objects", + "bbox": [ + 511, + 824, + 908, + 902 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.15485v2 [cs.CV] 13 Aug 2025", + "bbox": [ + 22, + 273, + 60, + 722 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "$^{1}$ Code and data: https://github.com/atinpothiraj/CAPTURE", + "bbox": [ + 107, + 886, + 421, + 898 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "by amodally completing a pattern. CAPTURE focuses on counting as it provides an objective and easy-to-verify output by comparing predicted counts with ground truth values. Moreover, patterned objects appear in various real-world domains, especially in man-made environments like parking lots, cities, and warehouses, where counting objects is often required. Fig. 1 illustrates the CAPTURE task. We show a VLM an image where objects are placed in a regular pattern (e.g., a 4x4 grid) with some objects occluded, and ask the model to count the total number of objects in the image assuming that the pattern continues behind the occlusion. The task requires handling occlusion, pattern recognition, and counting skills that exist in humans from a fairly young age [27, 30, 45], thus humans can easily answer such questions – indeed, we find that people can complete CAPTURE tasks with almost no error.", + "bbox": [ + 89, + 90, + 480, + 330 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "CAPTURE consists of two subsets: $\\mathrm{CAPTURE}^{\\mathrm{real}}$ and $\\mathrm{CAPTURE}^{\\mathrm{synthetic}}$ . As shown in Fig. 2, $\\mathrm{CAPTURE}^{\\mathrm{real}}$ contains real-world images and tests the ability of models to perform amodal counting in naturalistic contexts, while $\\mathrm{CAPTURE}^{\\mathrm{synthetic}}$ allows us to analyze specific factors by controlling different variables like color, shape, and number of objects. All images in $\\mathrm{CAPTURE}$ contain a pattern of objects and a manually annotated occluding black box covering some objects. $\\mathrm{CAPTURE}^{\\mathrm{real}}$ contains 924 images with a diverse range of settings and objects, covering 92 different object types, while $\\mathrm{CAPTURE}^{\\mathrm{synthetic}}$ contains 1250 images across multiple attribute classes.", + "bbox": [ + 89, + 335, + 480, + 517 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "By combining vision encoders with large language models (LLMs), VLMs have the potential to reason in a zero-shot way about visual inputs. To put this ability to the test and measure VLMs' ability to reason about missing visual information, we evaluate four strong recent VLMs (GPT40, InternVL2, Molmo, and Qwen2VL) on CAPTURE. Our experiment results (Sec. 4) show that models generally struggle with the multiple aspects of the task, with high error rates on both CAPTURE $^{\\text{real}}$ and CAPTURE $^{\\text{synthetic}}$ for occluded and unoccluded images. In contrast, we find that humans can perform the task easily: whereas model performance deteriorates as more objects in images are occluded, humans complete the task almost perfectly. We also compare VLMs to a vision-only model trained to count visible objects; while this model generally outperforms VLMs, its error is directly tied to the number of occluded objects – the more objects are occluded, the higher its error will be.", + "bbox": [ + 89, + 518, + 482, + 777 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "By objectively measuring VLMs' spatial reasoning capabilities under occlusion, CAPTURE highlights an unexpected weakness in VLMs. We analyze this weakness by providing the model with additional clues and information. Specifically, we test to what degree the VLMs' failure stems from an inability to integrate visual information by providing it with a text-based representation of the visible objects in the image in the form of object coordinates; here, VLMs", + "bbox": [ + 89, + 780, + 482, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "perform substantially better, indicating that their poor performance on CAPTURE stems partly from an inability to count objects in images, rather than an inability to count more generally. Our findings align with previous work, which similarly finds that VLMs struggle to count in images [22, 33, 42]. We also test the degree to which VLM errors stem from an inability to form a world model by providing it with auxiliary information (the coordinates of the occluded objects in text, or inpainting the occluded regions). We find that VLMs perform substantially better with this auxiliary information, suggesting that VLMs are partly limited by their inability to imagine the missing visual information. Addressing these gaps is critical for VLMs to function effectively in real-world scenarios, where visual reasoning often involves occlusions – whether counting stadium seats, components on production lines, or buildings in neighborhoods. We hope that our work will foster future research on improving the world modeling capabilities of VLMs.", + "bbox": [ + 511, + 90, + 903, + 364 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.CAPTURE", + "text_level": 1, + "bbox": [ + 513, + 377, + 635, + 392 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Task Overview", + "text_level": 1, + "bbox": [ + 511, + 402, + 663, + 417 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Input/output formulation. CAPTURE tests VLMs on occlusion reasoning, pattern recognition, and counting of both visible and occluded objects. VLMs already achieve high accuracy in classifying single, occluded objects [20]. Thus, we also argue that VLMs have the potential to perform well on CAPTURE's challenging task because their proficiency in handling occlusion ought to enable them to recognize occluded objects and reason accordingly. All images in CAPTURE contain a pattern. This makes the task solvable for models and people - if the objects were not placed in a pattern, it would be unreasonable to expect models to infer the position of the occluded objects. For example, given an image of a random pile of coins with a region occluded, it is not easy to infer whether the occluded region contains no coins or contains roughly the same amount as the rest of the pile. For this task, the patterns considered are all regular and fairly small, e.g. grids, circles, triangles, and other regular shapes - see Fig. 2 for further examples. The last step of CAPTURE is counting, asking the model to provide an objectively measurable output. In addition to VLMs, we also test COUNTGD [3], a state-of-the-art object detection-based counting method, finding that it fails to account for the occluded scenario, as its training entails solely predicting the visible, unoccluded objects in the image.", + "bbox": [ + 511, + 425, + 903, + 787 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Metric. We use symmetric mean percent error (sMAPE) as the primary metric. sMAPE is given by:", + "bbox": [ + 511, + 796, + 903, + 828 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {s M A P E} = 1 0 0 \\cdot \\frac {1}{n} \\sum_ {i = 1} ^ {n} \\frac {\\left| y _ {i} - \\hat {y} _ {i} \\right|}{\\left| y _ {i} \\right| + \\left| \\hat {y} _ {i} \\right|} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 596, + 839, + 903, + 878 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $y_{i}$ represents the actual values, $\\hat{y}_i$ represents the pre", + "bbox": [ + 511, + 886, + 903, + 901 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "CAPTUREreal", + "text_level": 1, + "bbox": [ + 269, + 89, + 341, + 99 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Instruction: Count the exact number of [object] in the image. Assume the pattern of [object] continues behind any black box. Provide the total number of [object] as if the black box were not there.", + "bbox": [ + 99, + 101, + 500, + 133 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/0320e46c96b593a455fec0826baa3f421db6d2c4a95077806655be22fb113a09.jpg", + "image_caption": [ + "GPT-4o: 18 bottle caps visible...black box covers...5 caps...total estimated count...is 23. Ground truth: 19" + ], + "image_footnote": [], + "bbox": [ + 96, + 136, + 223, + 213 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/b7f8ce3847d0a7e3146846c8edff7e0d3871512192406b9fb9b188c9cf8bf053.jpg", + "image_caption": [ + "GPT-4o: Bread rolls are organized in a 4x4 grid, which suggests there are 16 rolls in total. Ground truth: 20", + "GPT-4o: Total nuts in each row...6 times 6 = 36. Nuts in the hidden $3 \\times 3$ section = 9. 36 + 9 = 45 nuts. Ground truth: 42", + "Figure 2. Example images with GPT-4o responses to CAPTURE $^{\\text{real}}$ and CAPTURE $^{\\text{synthetic}}$ occluded splits." + ], + "image_footnote": [], + "bbox": [ + 227, + 136, + 393, + 214 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/fe55be050932617254da6354bbc993dec70850b06690d96aa4514239ea7aaef4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 400, + 136, + 504, + 214 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "CAPTUREsynthetic", + "text_level": 1, + "bbox": [ + 666, + 88, + 754, + 99 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Instruction: Your task is to count objects in the image. Assume the pattern of [object] continues behind the black box. First, state what the pattern is, then give your final count.", + "bbox": [ + 513, + 101, + 883, + 135 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/54a9fb14060fcc4940d0a1d77d308d09d15406b37a1ca1943cd3e02ab33800aa.jpg", + "image_caption": [ + "GPT-4o: Pattern: A circular arrangement... Visible count: ...8 ... Total count: ...12 dots. Ground truth: 11" + ], + "image_footnote": [], + "bbox": [ + 522, + 138, + 633, + 213 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/037e79bd08c36fe6b80824bcb0caf4643a0ad47db6c01c322d5a57de9b716faa.jpg", + "image_caption": [ + "GPT-4o: ...pattern consists of...a 2x2 grid... can infer...an additional four blue squares in the obscured section. Final count: 8 blue squares Ground truth: 6" + ], + "image_footnote": [], + "bbox": [ + 643, + 140, + 774, + 212 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/9049c59347241b0b3b291a226e884daeba5d88430388678bdb27fa8f3cd22b6c.jpg", + "image_caption": [ + "GPT-4o: Pattern: Triangular arrangement ...Final Count:7 red dots (5 visible $^+$ 2 estimated behind the black box). Ground truth:6" + ], + "image_footnote": [], + "bbox": [ + 785, + 138, + 893, + 212 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "dicted values, and $n$ is the number of observations. sMAPE is capped at $100\\%$ , providing a fixed range. This makes sMAPE ideal for challenging tasks like ours, as we can penalize responses that fail to produce an answer with a maximum error of $100\\%$ . For a justification of sMAPE over other metrics, see Appendix A.1.", + "bbox": [ + 89, + 344, + 483, + 435 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Dataset", + "text_level": 1, + "bbox": [ + 89, + 445, + 187, + 459 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "CAPTURE $^{\\text{real}}$ . We introduce a set of real images with patterns to test amodal counting in naturalistic settings. The original images and annotations come from the FSC-147 dataset [37], a diverse counting dataset with manual annotations for the number of target objects and all object bounding boxes in each image. FSC-147 contains a diverse array of objects, with 6146 real-world images across 147 object categories. We filter FSC-147 for images that contain identifiable and regular patterns of objects and manually overlay a black box to occlude some objects, resulting in 924 images. Filtering is first performed with GPT-4o and then manually verified; we also manually verify that determining objects despite the occlusion is feasible. For each example, we maintain both occluded and unoccluded versions. Further details on CAPTURE $^{\\text{real}}$ can be found in Appendix B.", + "bbox": [ + 88, + 467, + 482, + 694 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "CAPTUREsynthetic. While CAPTUREreal makes CAPTURE more applicable to real-world scenarios, each image is unique, making the data less controlled and challenging to draw clear conclusions about model performance. Images without background distractors, texture variance, and other potential visual obstacles provide a more controlled version of the task. Therefore, we create CAPTUREsynthetic to examine the task in a fully controlled environment. CAPTUREsynthetic comprises 1250 images of simple objects in patterns, where different variables are held constant or changed. We vary the following features:", + "bbox": [ + 89, + 702, + 482, + 869 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Object count: varies from 5 to 15.", + "2. Object: can be either dots or squares." + ], + "bbox": [ + 89, + 869, + 359, + 900 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/4dfd3d68fad466d166038fb0040b03ca482ef5b70451ed89cecd58cce73374cb.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CAPTURErealCAPTUREsynthetic
# Images9241250
# Object Types922
Avg. Occluded Obj.13.972.73
Avg. Total Obj.61.4510.00
StrengthsDiverse Objects/SettingsConfounder-free
NaturalisticControllable Attributes
Realistic ContextUniformly Distributed
", + "bbox": [ + 517, + 340, + 903, + 452 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Table 1. Statistics and strengths for CAPTURE splits.", + "bbox": [ + 547, + 462, + 870, + 477 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "3. Arrangement/shape: can be a rectangle, circle, or pyramid (where feasible based on object count).", + "4. Location: we consider five positions on the page: center, top-left, top-right, bottom-left, or bottom-right.", + "5. Color: we randomly choose one of 5 colors for all the objects in an image." + ], + "bbox": [ + 511, + 500, + 903, + 590 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The $\\mathrm{CAPTURE}^{\\mathrm{synthetic}}$ data is split similarly to the $\\mathrm{CAPTURE}^{\\mathrm{real}}$ data; each configuration has a variant with an overlaid occluding box and one without.", + "bbox": [ + 511, + 594, + 905, + 640 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. Statistics and Examples", + "text_level": 1, + "bbox": [ + 511, + 662, + 732, + 679 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Fig. 2 shows examples from $\\mathrm{CAPTURE}^{\\mathrm{real}}$ and $\\mathrm{CAPTURE}^{\\mathrm{synthetic}}$ paired with their corresponding answers from GPT-4o and their ground truth answers. These examples show the range of objects and patterns in the dataset and highlight the task's feasibility for humans. Tab. 1 reports summary statistics for $\\mathrm{CAPTURE}$ , including the number of images and object types, as well as the mean number of occluded and total objects in both splits of $\\mathrm{CAPTURE}$ . The number of objects in $\\mathrm{CAPTURE}^{\\mathrm{real}}$ is shown in Fig. 3, where most images have between 0 and 30 objects. On $\\mathrm{CAPTURE}^{\\mathrm{synthetic}}$ , the maximum number of objects is 15, and $\\mathrm{CAPTURE}^{\\mathrm{synthetic}}$ images generally have 1-6 occluded objects (shown in Fig. 4, as further occlusion could make the count unresolvable).", + "bbox": [ + 509, + 688, + 906, + 900 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/1bbc7a1297efec4901bf89c6b7bcce563982d0c367188c0e42f7ec57805c29ee.jpg", + "image_caption": [ + "Figure 3. # of objects in CAPTURE $^{\\text{real}}$ images." + ], + "image_footnote": [], + "bbox": [ + 133, + 95, + 434, + 224 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/b5bdfdd2908acbf8cdef268b0abf95ce0fe53e686332d8e190da4bf0e24212c7.jpg", + "image_caption": [ + "Figure 4. # of occluded objects in CAPTUREsynthetic images." + ], + "image_footnote": [], + "bbox": [ + 135, + 262, + 434, + 375 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3. Experiment Setup", + "text_level": 1, + "bbox": [ + 89, + 417, + 266, + 434 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1. Models", + "text_level": 1, + "bbox": [ + 89, + 441, + 183, + 455 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We experiment with GPT-4o [28], Intern-VL2-Llama3-8B [9, 10], Qwen2-VL-7B [41], MiniCPM-o 2.6 [47], and Kimi-VL-A3B [40] for their high scores on other VLM tasks [29]. We add Molmo 7B-D [13], because of its ability to \"point and count,\" giving it a potential advantage on CAPTURE. Specifically, Molmo is trained on millions of examples that directly ground text to 2D coordinates (or \"points\") in images. This allows Molmo to directly point to image coordinates and count more easily by pointing to several objects. All the VLMs feature a different language backbone and vision encoder to provide broad coverage of model architectures. To evaluate models, we provide the model with the name of the specific object to be counted and the explicit instruction to count fully visible objects and objects behind the occluding box (in the occluded images). For each model, we test ten prompts on a validation set of 100 images, selecting the best prompt for each model in each dataset section (CAPTURE $^{\\text{real}}$ /CAPTURE $^{\\text{synthetic}}$ ) and for each environment (occluded/unoccluded). We provide the selected prompts in Appendix D.", + "bbox": [ + 89, + 464, + 482, + 767 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2. Answer Generation and Extraction", + "text_level": 1, + "bbox": [ + 89, + 777, + 395, + 792 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given the complex nature of CAPTURE, we allow models to generate open-ended responses and then subsequently extract answers. Further details (including the maximum number of tokens) can be found in Appendix A.2.", + "bbox": [ + 89, + 800, + 480, + 861 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Answer extraction. Empirically, we found that constraining the output to a specific format for ease of analysis neg-", + "bbox": [ + 89, + 869, + 482, + 902 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "atively impacted benchmark performance. Therefore, we instead prompt models to generate freely and extract the final output number using a separate answer extractor based on Llama 3.1 8B [1]. This answer extractor takes the output from the model as input and prompts it to extract a single number representing the final answer. The answer extractor also identifies if an output failed to converge on a singular number answer and assigns a label to these examples. We mark such incomplete/incoherent model generations as 'skipped' questions and when calculating the error later, these responses are assigned the worst possible sMAPE score (100%). The answer extractor outputs were manually verified on 1000 outputs, and the extractor was found to be 100% accurate.", + "bbox": [ + 511, + 90, + 903, + 301 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Human and object detection baselines. We also report the performance of humans and a recent counting model (COUNTGD [3]) as baselines to establish a point of reference for model performance. To confirm that humans can perform the CAPTURE task, we provided 100 randomly selected occluded examples each from the CAPTURE $^{\\text{real}}$ and CAPTURE $^{\\text{synthetic}}$ subsets to 3 undergraduate students with no prior knowledge of the task.", + "bbox": [ + 511, + 311, + 903, + 434 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Results and Analysis", + "text_level": 1, + "bbox": [ + 511, + 446, + 712, + 464 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Main Results on CAPTUREreal", + "text_level": 1, + "bbox": [ + 511, + 472, + 774, + 487 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Models consistently struggle with counting and perform worse on occluded images. We run the VLMs on the occluded and unoccluded versions of CAPTURE to discern whether occlusion significantly impacts model performance. Tab. 2 shows that all models struggle with counting generally, performing poorly on both splits. Moreover, we see that every model performs better on the unoccluded images. On average, the models perform $6.28\\%$ worse in $\\mathrm{CAPTURE}^{\\mathrm{real}}$ occluded images and $4.85\\%$ worse in $\\mathrm{CAPTURE}^{\\mathrm{synthetic}}$ occluded images (in terms of absolute sMAPE), indicating increased difficulty from a standard counting task. The best model for both splits, GPT-4o, has an error rate of $14.75\\%$ on $\\mathrm{CAPTURE}^{\\mathrm{real}}$ and a lower error rate of $9.71\\%$ on $\\mathrm{CAPTURE}^{\\mathrm{synthetic}}$ . Across both the real and synthetic split, GPT-4o's error increases with occlusion, by $1.41\\%$ on the real data and $3.81\\%$ on the synthetic split. Interestingly, despite its fine-tuning on counting tasks, Molmo exhibits a sizable error rate of $32.5\\%$ on $\\mathrm{CAPTURE}^{\\mathrm{real}}$ occluded images. The high error rates of VLMs indicate limited capabilities in visual understanding under occlusions, pattern recognition, and counting. We further analyze the source of these errors with oracle experiments in Sec. 4.3.", + "bbox": [ + 511, + 498, + 903, + 845 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Humans complete the task with almost no error. Tab. 3, evaluated on a 100-example subset of each split, confirms that humans complete the task with ease despite occlusion,", + "bbox": [ + 511, + 854, + 903, + 900 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 925, + 503, + 935 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/4518a5465c290528c213f388056a3af4202975be799d21131dcfbeb4f70ff0f6.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelError (%) [↓]
CAPTURErealCAPTUREsynthetic
Originalw/ Occlusion (Δ)Originalw/ Occlusion (Δ)
GPT-4o13.3414.75 (+1.41)5.909.71 (+3.81)
InternVL226.1732.90 (+6.73)16.4417.57 (+1.13)
Molmo25.9032.49 (+6.59)8.4017.73 (+9.33)
Qwen2VL18.9629.33 (+10.37)6.6311.74 (+5.11)
MiniCPM-o 2.623.8430.08 (+6.24)17.0619.00 (+1.94)
Kimi-VL-A3B23.4825.96 (+2.48)16.9118.07 (+1.16)
Avg. of 6 VLMs21.9527.59 (+5.64)11.8915.64 (+3.75)
", + "bbox": [ + 254, + 88, + 738, + 253 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/b2289750d0a6af17eb500803ad640c7b0e5d851d29c011084ed404545dd9bf6b.jpg", + "table_caption": [ + "Table 2. Results across VLMs on all splits of CAPTURE, with average error for each column. Metric: sMAPE (lower is better)." + ], + "table_footnote": [], + "table_body": "
ModelError (%) [↓]
CAPTURErealCAPTUREsynthetic
(Baseline)
Human3.790.92
(VLMs)
GPT-4o14.759.71
InternVL232.9017.57
Molmo32.4917.73
Qwen2VL29.3311.74
Avg. of 4 VLMs27.3714.19
", + "bbox": [ + 109, + 301, + 460, + 470 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 3. Human baseline vs VLMs on CAPTURE $^{\\text{real}}$ and CAPTURE $^{\\text{synthetic}}$ (occluded split). Metric: sMAPE (lower is better).", + "bbox": [ + 89, + 479, + 480, + 508 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "with an sMAPE of $3.79\\%$ on $\\mathrm{CAPTURE}^{\\mathrm{real}}$ and $0.92\\%$ on $\\mathrm{CAPTURE}^{\\mathrm{synthetic}}$ . On the same subset of examples, models performed 7 times worse on $\\mathrm{CAPTURE}^{\\mathrm{real}}$ and 14 times worse on $\\mathrm{CAPTURE}^{\\mathrm{synthetic}}$ than humans, underscoring the gap between VLMs and humans in this task.", + "bbox": [ + 89, + 525, + 482, + 602 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Object detection-based baseline outperforms VLMs. We attempt the task with a strong object detection-based model to highlight that a standard counting approach will experience a greater loss going from unoccluded to occluded environments, as it cannot capture any occluded objects, i.e. cannot reason. We choose COUNTGD [3], the top solution for unoccluded counting on FSC-147, on which it was trained. Because we draw our images from FSC-147's train and test sets, and COUNTGD trains on FSC-147, we only evaluate COUNTGD on the subset of our data sourced from the FSC-147 test set, consisting of 149 images.", + "bbox": [ + 88, + 612, + 482, + 777 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We find that COUNTGD deteriorates by $7.19\\%$ on occluded images, increasing from $3.15\\%$ sMAPE to $10.34\\%$ as observed in Fig. 5. As expected, COUNTGD outperforms all VLMs on the unoccluded split as it is trained for counting on FSC-147. COUNTGD also outperforms the VLMs on the occluded split, reinforcing that only counting the visible objects is a hard-to-beat baseline. However, the drop in performance with occlusion is greater than the average", + "bbox": [ + 89, + 779, + 482, + 901 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/6852ecb13e232280415a379065d7456c9457cca7da26b7d7923a10788f9ddee8.jpg", + "image_caption": [ + "Figure 5. VLM vs. VLM + CountGD hybrid on questions from the CAPTURE $^{\\text{real}}$ (occluded split) that are not in COUNTGD training set. Metric: sMAPE (lower is better)." + ], + "image_footnote": [], + "bbox": [ + 517, + 305, + 898, + 397 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "VLM's drop, highlighting a disadvantage of non-reasoning solutions on CAPTURE: their error is necessarily tied directly to the number of occluded objects and they cannot address the task on their own, whereas a VLM might be able to infer missing objects via reasoning.", + "bbox": [ + 511, + 472, + 905, + 549 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Hybrid VLM counting systems improve performance. Finding that COUNTGD is far better at counting visible objects than VLMs, we leverage the advantage that COUNTGD has by feeding its visible object count information to the VLMs as part of the prompt. As expected, Fig. 5 illustrates that there is a considerable decrease in error when CountGD and the VLMs are combined. However, this hybrid system still performs worse than COUNTGD alone, indicating VLMs are still subpar even at counting just occluded objects (as further reinforced by Appendix C.3).", + "bbox": [ + 511, + 556, + 906, + 709 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. Effect of Data Factors on VLM Performance", + "text_level": 1, + "bbox": [ + 511, + 718, + 890, + 732 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Here, we use the CAPTURE $^{\\text{synthetic}}$ data (which can be controlled precisely and fully annotated) to examine which features correlate with model performance. We test the effect of the following variables on final performance: (1) Increasing the number of occluded objects; (2) Varying the pattern. We also investigate whether models can classify patterns, and to what degree models can predict the number of occluded objects only (rather than the total).", + "bbox": [ + 511, + 739, + 905, + 861 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Models perform worse when more dots are occluded. In Fig. 6 (right), we observe that error increases with re", + "bbox": [ + 511, + 869, + 905, + 900 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 924, + 503, + 935 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/d1ff88f8e06685dc0b485224cacfa612ed34e24c8db13ff474eb31b034de7999.jpg", + "image_caption": [ + "Figure 6. Effect of number of total objects in the image and number of occluded objects on sMAPE from $\\mathrm{CAPTURE}^{\\mathrm{synthetic}}$ (occluded split). Metric: sMAPE (lower is better)." + ], + "image_footnote": [], + "bbox": [ + 178, + 90, + 509, + 229 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/0aacafaba92370dae1f0e5b5f4e96b3961bd610b4f253bcf291566faf27d3a59.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 513, + 90, + 813, + 229 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/6dcb61ccb0400b9be87716c1ec763a1aee18b1ea03c0a515f3207f2846c30ff6.jpg", + "image_caption": [ + "Figure 7. Effect of pattern type in CAPTURE $^{\\text{synthetic}}$ (occluded split) on sMAPE. Metric: sMAPE (lower is better)." + ], + "image_footnote": [], + "bbox": [ + 112, + 290, + 455, + 448 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "spect to the number of occluded dots. However, Fig. 6 (left) also shows that performance is less affected by the total number of dots. This suggests that the task difficulty is more closely correlated with the difficulty of occlusion – i.e. the difficulty of the world modeling task – rather than the complexity of the pattern. Some models, such as GPT-4o, deviate from this trend, which has lower error on specific numbers. We further explore model bias in Appendix C.5.", + "bbox": [ + 89, + 502, + 483, + 625 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Performance depends on pattern type. The controllability of $\\mathrm{CAPTURE}^{\\mathrm{synthetic}}$ allows us to measure the effect of pattern type on performance. In Fig. 7, we find that model performance differs across shapes with some regularity: objects arranged in a circle generally have lower sMAPE than other shapes, across all models. Qwen2VL has an espe", + "bbox": [ + 89, + 633, + 483, + 724 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/ce5800214bd41b55746d8d1c7787aaa4f4fc6e766570fdd0f051c236d58c6c8e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelAccuracy (%) [↑]
Originalw/ Occlusion (Δ)
GPT-4o84.0078.52 (-5.48)
InternVL268.5247.48 (-21.04)
Molmo80.7065.22 (-15.48)
Qwen2VL88.3586.43 (-1.92)
Avg. of 4 VLMs80.3969.41 (-10.98)
", + "bbox": [ + 130, + 739, + 439, + 863 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 4. VLM accuracy in identifying the correct pattern in CAPTUREsynthetic. Metric: accuracy (higher is better).", + "bbox": [ + 89, + 875, + 482, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "cially large decrease in error when given circular arrangements compared to rectangles or triangles.", + "bbox": [ + 511, + 289, + 903, + 320 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Models can identify patterns. To determine how much model errors can be attributed to a lack of pattern recognition ability, we formulate a separate task where models must recognize the pattern in the image on CAPTURE $^{\\text{synthetic}}$ . Here, we frame the task as multiple-choice, asking the model to select from the pattern types available (rectangle, triangle, or circle). Table 4 illustrates that all perform substantially better than random at this task, with most models except InternVL2 achieving accuracy above $80\\%$ in the unoccluded setting. As expected, the patterns were easier to identify in unoccluded scenarios, with models suffering an average accuracy drop of $10.95\\%$ in the occluded setting. Notably, GPT-4o and Qwen2VL have a fairly small drop in performance, suggesting they can generally capture the pattern even in the presence of occlusion.", + "bbox": [ + 511, + 329, + 906, + 555 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3. Analysis with Auxiliary Information", + "text_level": 1, + "bbox": [ + 511, + 566, + 826, + 582 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In Sec. 4.1, we see that models broadly struggle with amodal counting. Here, we seek to disentangle whether this problem results from a failure to reason, the absence of a world model, or both by giving VLMs two different types of auxiliary information: oracle information and predicted information. Oracle information is ground truth and is directly pulled from CAPTURE's metadata, e.g., object locations. Predicted information generates new information from a completely separate model and gives it to the VLM. This information is not ground truth and is sourced from an external model, such as an image inpainting model, rather than the VLM. By giving the model auxiliary information in the form of reasoning and spatial clues, we can establish how much of each model's error results from an inability to handle occlusion rather than an inability to recognize and count visible objects.", + "bbox": [ + 511, + 588, + 906, + 830 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Oracle setup. We test two oracles for $\\mathsf{CAPTURE}^{\\mathsf{real}}$ 's occluded split based on its constituent subtasks: counting the visible objects and inferring/counting occluded objects. Both oracles provide the VLM with text-based coordinates", + "bbox": [ + 511, + 839, + 908, + 900 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 925, + 503, + 935 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/c9dcd23e293902294ad0bb304302126a7e2ee754cdd92911fd7e5aabcfc11d68.jpg", + "image_caption": [ + "With Occlusion", + "With Occlusion Prompt: Count the exact number of cans in the image. Assume the pattern of cans continues behind any black box. Provide the total number of cans as if the black box were not there.", + "Figure 8. Example image and text inputs for experiments with auxiliary information experiments (Sec. 4.3). Blue eyes indicate objects for which the All Object Coordinate Oracle or Visible Object Coordinate Oracle extracts coordinates. The brighter part of the image represents the area which Inpainting Pipeline fills in. Example prompts are shown in italics. Blue eye overlays and faded parts of images are for demonstration purposes and are not passed with the image." + ], + "image_footnote": [], + "bbox": [ + 98, + 104, + 295, + 196 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/fdce1f292eb8c63e5d8886496c488350aad21c286ba6461c0abc5dfe7d1ccd31.jpg", + "image_caption": [ + "All Object Coordinate Oracle", + "(w/Oracle information) \nAll Object Coordinate Oracle Prompt: Count the exact number of cans in the image, including behind the black box... Coordinates of all cans:59,43219,38356,43 522,3663,18073,335214 186),379,184),524,177220 332372,329525,325" + ], + "image_footnote": [], + "bbox": [ + 302, + 104, + 496, + 196 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d862b1fc3435e11a6f995250f084863846b2a1867e3e95f87cda3d508c3650df.jpg", + "image_caption": [ + "Visible Object Coordinate Oracle", + "(w/Oracle information) \nVisible Object Coordinate \nOracle Prompt: Count the exact number of cans in the image, including behind the black box... \nCoordinates of visible cans: (59, 43), (219, 38), (356, 43), (522, 36), (63, 180), (73, 335)" + ], + "image_footnote": [], + "bbox": [ + 500, + 104, + 699, + 196 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/8578bf7e10fea3496bee51ad25b496e2829784d6fda6ca668364777004884e8c.jpg", + "image_caption": [ + "Inpainting Pipeline", + "(w/ Predicted information) \nInpainting Pipeline Prompt: \nCount the exact number of cans in the image.", + "(Fading added only for emphasis to visualize infilling. Final image given to VLM is not faded)" + ], + "image_footnote": [], + "bbox": [ + 702, + 104, + 900, + 196 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/766d3a6289f8b6fdb33abb8032609a92274b1bc67ea432f698f4bdcbf6a1cf2d.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelOriginalw/ OcclusionOracle InformationPredicted Information
+ All Coordinates (Δ)+ Visible (Δ)+ Inpainting (Δ)
GPT-4o13.3414.752.93 (-11.82)9.20 (-5.55)15.89 (+1.14)
InternVL226.1732.9017.48 (-15.42)25.13 (-7.77)31.12 (-1.78)
Qwen2VL18.9629.339.62 (-19.71)17.70 (-11.63)22.64 (-6.69)
Avg. of 3 VLMs19.4925.6610.01 (-15.65)17.34 (-8.32)23.22 (-2.44)
", + "bbox": [ + 161, + 375, + 834, + 481 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 5. Effect of auxiliary information on occluded CAPTURE ${}^{\\text{real. }}\\Delta =$ (Auxiliary Information) - (w/ Occlusion). Metric: sMAPE.", + "bbox": [ + 99, + 492, + 892, + 506 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "of objects in the image, simplifying the visual task by assuming the VLM effectively has a perfect visual system that can recognize and localize objects in the image. The first oracle, the Visible Object Coordinate Oracle, gives the VLM the coordinates of all unoccluded objects (encoded as text, as seen in Fig. 8) and instructs the model to estimate the number of occluded objects, count the number of visible object coordinates, and add the two. In other words, the model is given oracle information about what objects are visible, thus also revealing key information about the pattern. The second oracle, the All Object Coordinate Oracle, instead gives the model the coordinates of all objects. Here, the model only needs to count the coordinates in the prompt, eliminating the need to reason on the visual input. Note that Molmo is excluded in these tests because it contains a prompt limit that would truncate the list of coordinates. An example of the oracle inputs can be seen in Fig. 8.", + "bbox": [ + 88, + 532, + 485, + 790 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Prediction setup. In this setting, we provide the VLM with an external world model representation predicted by another model. Specifically, we develop the Inpainting Pipeline to fill in the occluded region via a diffusion-based inpainting model and pass the inpainted image to the VLMs. For the inpainting model, we choose FLUX.1-Fill [dev],", + "bbox": [ + 89, + 809, + 483, + 901 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "whose backbone FLUX.1 [dev] [21] is a top public model in the Text to Image Model Arena [7]. An example input to the VLM can be seen on the far-right of Fig. 8.", + "bbox": [ + 511, + 532, + 906, + 580 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Providing visible or all object coordinates improves performance substantially. The results in Tab. 2 indicate that models struggle on CAPTURE, which requires identifying a pattern and counting both visible and occluded objects. Moreover, models generally struggle with counting even in unoccluded settings. Both oracles simplify the counting task: All Object Coordinate Oracle reduces the task to simply counting coordinates with no reasoning involved, and Visible Object Coordinate Oracle similarly simplifies the task for visible objects, while still requiring inferring occluded objects. Additionally, under Visible Object Coordinate Oracle, recognizing the pattern shifts from a visual reasoning task to an augmented math problem. Instead of visually reasoning about where objects are located, the VLM considers what patterns the coordinates could make. Translating this task into a text problem results in an average increase of $15\\%$ with all objects coordinate oracle; the errors LLMs make here are due to an inability to count in the text prompt, as opposed to weaknesses in handling occlusion (since all object coordinates are given), and the strongest", + "bbox": [ + 511, + 598, + 908, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 924, + 504, + 935 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "model, GPT-4o, achieves minimal error here. We also obtain an average increase of $8\\%$ with the visible objects coordinate oracle (shown in Tab. 5), possibly because it allows the more powerful LLM backbone (which is far larger than the vision model in all models tested) to complete the counting task. Taken together, these results suggest that there is much room for improvement in visual world modeling beyond text-based reasoning of VLMs.", + "bbox": [ + 93, + 90, + 480, + 210 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Providing diffusion-based inpainting improves performance marginally. Similar to the object coordinate oracles, the Inpainting Pipeline (rightmost columns in Fig. 8 and Tab. 5) eliminates the need for world modeling and provides VLMs with an approximation of the image behind the occluder. With the inpainted images, VLM error decreases by almost $2\\%$ for InternVL2 and $7\\%$ for Qwen2VL compared to the original occluded images. GPT-4o's error increases on inpainted images by a small margin; we hypothesize that this may be because GPT-4o has one of the better world models (based on its superior performance), and thus does not improve further with the inpainted images. Moreover, every VLM still falls short of its unoccluded image performance, indicating that the diffusion model is not a perfect world model. Qualitatively, we find that the inpainting model sometimes fails to output the correct pattern.", + "bbox": [ + 93, + 223, + 480, + 463 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5. Related Work", + "text_level": 1, + "bbox": [ + 94, + 483, + 228, + 496 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Spatial reasoning in visual question answering. Past work measures the spatial reasoning capabilities of VLMs in the form of visual question answering (VQA) [4, 16] benchmarks. SpartQA [26] asks VLMs to identify the spatial relation (e.g., above, behind, left of) between objects in synthetically created 2D images from NLVR [39]. More recent benchmarks test similar spatial relation understanding with real images [2, 24, 36]. While this past work asks models to provide a text description for a relation between two fully observed objects, CAPTURE measures the world modeling from a partially observed scene, thus requiring the handling of occlusion, pattern recognition, and counting. Together, these constitute a stricter test of spatial reasoning than typical VQA settings.", + "bbox": [ + 93, + 512, + 480, + 723 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Amodal completion. Occlusions are common in natural scenes, and vision solutions for amodal completion have made significant progress in infilling occlusions [6, 38, 46]. The amodal completion task has evolved from simply completing a shape to filling in appearance (e.g., texture, color, etc.) to finally dealing with fine-grained order perception (multiple stacked occluded objects) [5]. Specifically in Qiu and Di [34], VLMs classify the hidden objects and extract fine details from occluded items. CAPTURE, however, presents a unique category of patterned amodal counting which requires inferring fully occluded objects based on a", + "bbox": [ + 93, + 734, + 480, + 898 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "pattern rather than inferring occluded object wholes based on object parts. In other words, previous work has only attempted tasks that require amodal completion for one object at a time [31, 38, 46], whereas CAPTURE handles multiple objects. Multi-object amodal completion is crucial because in cluttered scenes, entire groups of objects are often occluded. Moreover, the output space of CAPTURE is language (rather than filling pixels).", + "bbox": [ + 516, + 90, + 903, + 210 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Counting with vision-and-language models. Within the task of counting, the most similar application to CAPTURE is dense counting, where the objects to be counted occlude each other. There are many practical applications of such a task, like counting cells on a crowded slide [8], determining crop yields from densely-packed fields [43], or crowd counting [14, 44, 48]. Liang et al. [23] improved crowd counting with an augmented CLIP [35], i.e. also using VLMs for counting. Additionally, Jenkins et al. [18] introduced an amodal counting benchmark, presenting an occluded 3D counting task where models must count objects on retail shelves. However, our work differs in many ways, as Jenkins et al. [18] only counts retail shelves and uses Li-DAR input. More broadly, dense counting focuses on overlapping objects rather than on counting objects arranged into patterns, which is the focus of CAPTURE.", + "bbox": [ + 516, + 220, + 903, + 460 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 516, + 476, + 630, + 489 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We introduced CAPTURE, a novel benchmark for amodal counting that measures spatial reasoning capabilities under occlusion. CAPTURE is designed to assess VLMs' ability to form a robust world model and use that model for visual reasoning skills under occlusion. By testing counting, we cast the problem as a measurable task with an objective correct answer that also has real-world utility as VLMs become more broadly adopted. Our results suggest that VLMs struggle to combine reasoning, counting, and world modeling with low performance on occluded and unoccluded images. Our analysis indicates that models improve with oracle information about visible objects (simplifying the reasoning/counting tasks) and predicted information about the occluded objects (also simplifying world modeling), pointing to directions of model improvement.", + "bbox": [ + 516, + 500, + 903, + 726 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 516, + 739, + 669, + 755 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This work was supported by DARPA ECOLE Program No. HR00112390060, NSF-CAREER Award 1846185, NSF-AI Engage Institute DRL-2112635, DARPA Machine Commonsense (MCS) Grant N66001-19-2-4031, ARO Award W911NF2110220, ONR Grant N00014-23-1-2356, Microsoft Accelerate Foundation Models Research (AFMR) grant program, and a Bloomberg Data Science PhD Fellowship. The views contained in this article are those of the authors and not of the funding agency.", + "bbox": [ + 516, + 763, + 903, + 898 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 925, + 503, + 935 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 89, + 187, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] AI@Meta. Llama 3.1 model card. *Github Model Card*, 2024. 4", + "[2] Haider Al-Tahan, Quentin Garrido, Randall Balestriero, Diane Bouchacourt, Caner Hazirbas, and Mark Ibrahim. Unibench: Visual reasoning requires rethinking vision-language beyond scaling. arXiv preprint arXiv:2408.04810, 2024. 8", + "[3] Niki Amini-Naeni, Tengda Han, and Andrew Zisserman. Countgd: Multi-modal open-world counting. arXiv preprint arXiv:2407.04619, 2024. 2, 4, 5", + "[4] Stanislaw Antol, Aishwarya Agrawal, Jiasen Lu, Margaret Mitchell, Dhruv Batra, C Lawrence Zitnick, and Devi Parikh. Vqa: Visual question answering. In Proceedings of the IEEE international conference on computer vision, pages 2425-2433, 2015. 8", + "[5] Jiayang Ao, Qiuhong Ke, and Krista A Ehinger. Image amodal completion: A survey. Computer Vision and Image Understanding, 229:103661, 2023. 1, 8", + "[6] Jiayang Ao, Yanbei Jiang, Qiuhong Ke, and Krista A Ehinger. Open-world amodal appearance completion. arXiv preprint arXiv:2411.13019, 2024. 8", + "[7] Artificial Analysis. Text to image model arena, 2025. Accessed: April 10, 2025. 7", + "[8] Soumen Bera. Partially occluded object detection and counting. In Proceedings of the 2015 Third International Conference on Computer, Communication, Control and Information Technology (C3IT), pages 1-6. IEEE, 2015. 8", + "[9] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, Bin Li, Ping Luo, Tong Lu, Yu Qiao, and Jifeng Dai. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. arXiv preprint arXiv:2312.14238, 2023. 4", + "[10] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 4", + "[11] Davide Chicco, Matthijs J Warrens, and Giuseppe Jurman. The coefficient of determination r-squared is more informative than smape, mae, mape,mse and rmse in regression analysis evaluation. Peerj computer science, 7:e623, 2021. 11", + "[12] Nikolas Coupland. How frequent are numbers? Language & Communication, 31(1):27-37, 2011. 13", + "[13] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, et al. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv preprint arXiv:2409.17146, 2024. 4", + "[14] Zheyi Fan, Zihao Song, Di Wu, and Yixuan Zhu. Multibranch segmentation-guided attention network for crowd counting. Journal of Visual Communication and Image Representation, 97:103964, 2023. 8", + "[15] Benito E Flores. A pragmatic view of accuracy measurement in forecasting. Omega, 14(2):93-98, 1986. 11" + ], + "bbox": [ + 93, + 114, + 482, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[16] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Bartra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6904-6913, 2017. 8", + "[17] David Ha and Jürgen Schmidhuber. Recurrent world models facilitate policy evolution. Advances in neural information processing systems, 31, 2018. 1", + "[18] Porter Jenkins, Kyle Armstrong, Stephen Nelson, Siddhesh Gotad, J Stockton Jenkins, Wade Wilkey, and Tanner Watts. Countnet3d: A 3d computer vision approach to infer counts of occluded objects. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3008-3017, 2023. 8", + "[19] Gaetano Kanizsa, Paolo Legrenzi, and Paolo Bozzi. Organization in vision: essays on gestalt perception. Praeger, 1979. 1", + "[20] Kaleb Kassaw, Francesco Luzi, Leslie M Collins, and Jordan M Malof. Are deep learning models robust to partial object occlusion in visual recognition tasks? arXiv preprint arXiv:2409.10775, 2024. 2", + "[21] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024.7", + "[22] Baiqi Li, Zhiqiu Lin, Wenxuan Peng, Jean de Dieu Nyandwi, Daniel Jiang, Zixian Ma, Simran Khanuja, Ranjay Krishna, Graham Neubig, and Deva Ramanan. Naturalbench: Evaluating vision-language models on natural adversarial samples. arXiv preprint arXiv:2410.14669, 2024. 2", + "[23] Dingkang Liang, Jiahao Xie, Zhikang Zou, Xiaqing Ye, Wei Xu, and Xiang Bai. Crowdclip: Unsupervised crowd counting via vision-language model. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2893-2903, 2023. 8", + "[24] Fangyu Liu, Guy Edward Toh Emerson, and Nigel Collier. Visual spatial reasoning. Transactions of the Association for Computational Linguistics, 2023. 8", + "[25] Baraka Jacob Maiseli. Optimum design of chamfer masks using symmetric mean absolute percentage error. EURASIP Journal on Image and Video Processing, 2019(1):74, 2019. 11", + "[26] Roshanak Mirzaee and Hossein Rajaby. Spartqa: A textual question answering benchmark for spatial reasoning. In The 2021 Annual Conference of the North American Chapter of the Association for Computational Linguistics (NAACL-2021), 2021. 8", + "[27] Ingrid R Olson, J Christopher Gatenby, Hoi-Chung Leung, Pawel Skudlarski, and John C Gore. Neuronal representation of occluded objects in the human brain. Neuropsychologia, 42(1):95-104, 2004. 1, 2", + "[28] OpenAI. Hello gpt-4o, 2024. 4", + "[29] OpenCompass Team. Openvlm leaderboard. https://huggingface.co/spaces/opencompass/open_vlmleaderboard, 2024. Accessed: 2024-11-13. 4", + "[30] Yumiko OTSUKA, So KANAZAWA, and Masami K YAMAGUCHI. Development of modal and amodal completion in infants. Perception (London. Print), 35(9):1251-1264, 2006. 1, 2" + ], + "bbox": [ + 516, + 92, + 906, + 898 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 924, + 504, + 936 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[31] Ege Ozguroglu, Ruoshi Liu, Dídac Surís, Dian Chen, Achal Dave, Pavel Tokmakov, and Carl Vondrick. pix2gestalt: Amodal segmentation by synthesizing wholes. In 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3931-3940. IEEE Computer Society, 2024. 8", + "[32] Max Peeperkorn, Tom Kouwenhoven, Dan Brown, and Anna Jordanous. Is temperature the creativity parameter of large language models? arXiv preprint arXiv:2405.00492, 2024. 12", + "[33] Muhammad Fetrat Qharabagh, Mohammadreza Ghofrani, and Kimon Fountoulakis. Lvlm-count: Enhancing the counting ability of large vision-language models. arXiv preprint arXiv:2412.00686, 2024. 2", + "[34] Wenmo Qiu and Xinhan Di. Occ-mlm: Empowering multimodal large language model for the understanding of occluded objects. arXiv preprint arXiv:2410.01261, 2024. 8", + "[35] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 8", + "[36] Navid Rajabi and Jana Kosecka. Gsr-bench: A benchmark for grounded spatial reasoning evaluation via multimodal llms. arXiv preprint arXiv:2406.13246, 2024. 8", + "[37] Viresh Ranjan, Udbhav Sharma, Thu Nguyen, and Minh Hoai. Learning to count everything. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3394-3403, 2021. 3", + "[38] Kaziwa Saleh, Sándor Szenási, and Zoltán Vámossy. Mask guided gated convolution for amodal content completion. In 2024 IEEE 22nd Jubilee International Symposium on Intelligent Systems and Informatics (SISY), pages 000321-000326. IEEE, 2024. 8", + "[39] Alane Suhr, Mike Lewis, James Yeh, and Yoav Artzi. A corpus of natural language for visual reasoning. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 217-223, Vancouver, Canada, 2017. Association for Computational Linguistics. 8", + "[40] Kimi Team, Angang Du, Bohong Yin, Bowei Xing, Bowen Qu, Bowen Wang, Cheng Chen, Chenlin Zhang, Chenzhuang Du, Chu Wei, Congcong Wang, Dehao Zhang, Dikang Du, Dongliang Wang, Enming Yuan, Enzhe Lu, Fang Li, Flood Sung, Guangda Wei, Guokun Lai, Han Zhu, Hao Ding, Hao Hu, Hao Yang, Hao Zhang, Haoning Wu, Haotian Yao, Haoyu Lu, Heng Wang, Hongcheng Gao, Huabin Zheng, Jiaming Li, Jianlin Su, Jianzhou Wang, Jiaqi Deng, Jiezhong Qiu, Jin Xie, Jinhong Wang, Jingyuan Liu, Junjie Yan, Kun Ouyang, Liang Chen, Lin Sui, Longhui Yu, Mengfan Dong, Mengnan Dong, Nuo Xu, Pengyu Cheng, Qizheng Gu, Runjie Zhou, Shaowei Liu, Sihan Cao, Tao Yu, Tianhui Song, Tongtong Bai, Wei Song, Weiran He, Weixiao Huang, Weixin Xu, Xiaokun Yuan, Xingcheng Yao, Xingzhe Wu, Xinxing Zu, Xinyu Zhou, Xinyuan Wang, Y. Charles, Yan Zhong, Yang Li, Yangyang Hu, Yanru Chen, Yejie Wang, Yibo Liu, Yibo Miao, Yidao Qin, Yimin Chen" + ], + "bbox": [ + 91, + 92, + 480, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yiping Bao, Yiqin Wang, Yongsheng Kang, Yuanxin Liu, Yulun Du, Yuxin Wu, Yuzhi Wang, Yuzi Yan, Zaida Zhou, Zhaowei Li, Zhejun Jiang, Zheng Zhang, Zhilin Yang, Zhiqi Huang, Zihao Huang, Zijia Zhao, and Ziwei Chen. Kimi-VL technical report, 2025. 4", + "[41] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 4", + "[42] Wei-Yao Wang, Zhao Wang, Helen Suzuki, and Yoshiyuki Kobayashi. Seeing is understanding: Unlocking causal attention into modality-mutual attention for multimodal llms. arXiv preprint arXiv:2503.02597, 2025. 2", + "[43] Yiding Wang, Yuxin Qin, and Jiali Cui. Occlusion robust wheat ear counting algorithm based on deep learning. Frontiers in Plant Science, 12:645899, 2021. 8", + "[44] Yongjie Wang, Feng Wang, and Dongyang Huang. Dual-branch counting method for dense crowd based on self-attention mechanism. Expert Systems with Applications, 236:121272, 2024. 8", + "[45] Karen Wynn. Children's understanding of counting. Cognition, 36(2):155-193, 1990. 1, 2", + "[46] Katherine Xu, Lingzhi Zhang, and Jianbo Shi. Amodal completion via progressive mixed context diffusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9099-9109, 2024. 8", + "[47] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. Minicpm-v: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024. 4", + "[48] Lifang Zhou, Songlin Rao, Weisheng Li, Bo Hu, and Bo Sun. Multi-branch progressive embedding network for crowd counting. Image and Vision Computing, page 105140, 2024. 8" + ], + "bbox": [ + 516, + 92, + 903, + 613 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Appendix", + "text_level": 1, + "bbox": [ + 91, + 90, + 179, + 107 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A. Implementation Details", + "text_level": 1, + "bbox": [ + 91, + 116, + 316, + 133 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A.1. Metric Details", + "text_level": 1, + "bbox": [ + 91, + 141, + 241, + 157 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We use symmetric mean percent error (sMAPE) as the primary metric for our benchmarks due to its resistance to bias for under/over predictions and small/large ground truths [25]. The standard metric for a counting benchmark is mean average error (MAE). MAE is popular, but heavily penalizes predictions that deviate by a small margin from big ground truths, highlighting the necessity for a metric that gives equal weighting to all questions. Mean average percent error (MAPE) initially seems appealing but is disproportionately inflated for small ground truths and is biased towards overpredictions. Mean square error (MSE) and root mean square error (RMSE) are also commonly used but are very sensitive to outliers because they square the error. Intuitively, performing well on almost all questions and poorly on a small subset should score better than consistently being wrong. Among commonly-used metrics, sMAPE is the only metric that evaluates performance in relation to the distribution of ground truth elements [11]. There are two common definitions [15] for sMAPE, but we use the one that scales to $100\\%$ . sMAPE is given by:", + "bbox": [ + 89, + 162, + 483, + 467 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {s M A P E} = 1 0 0 \\cdot \\frac {1}{n} \\sum_ {i = 1} ^ {n} \\frac {\\left| y _ {i} - \\hat {y} _ {i} \\right|}{\\left| y _ {i} \\right| + \\left| \\hat {y} _ {i} \\right|} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 171, + 479, + 482, + 517 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "where $y_{i}$ represents the actual values, $\\hat{y}_i$ represents the predicted values, and $n$ is the number of observations. sMAPE is capped at $100\\%$ , providing a finite scoring range. This feature is ideal for challenging tasks like ours, as it penalizes model responses that fail to produce an answer.", + "bbox": [ + 89, + 525, + 483, + 602 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A.2. Output Tokens", + "text_level": 1, + "bbox": [ + 89, + 611, + 246, + 628 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "To maximize the VLM's chance at success, we allocate a high number of output tokens to generate a rationale and output. This varies per model. We give 4000 tokens to InternVL2, 2000 tokens to Molmo, and 8192 tokens to Qwen2VL, following their max output lengths. For GPT-40, we use the default of 4096 tokens.", + "bbox": [ + 89, + 633, + 483, + 724 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "B. CAPTURE Dataset Creation Details", + "text_level": 1, + "bbox": [ + 89, + 738, + 426, + 755 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The following expands upon Sec. 2.2. While FSC-147, a diverse counting dataset with manual annotations, is a strong starting point, it cannot immediately be adapted to our task. To make the task of amodal counting solvable, our dataset requires images with patterns in them. A person (or model) can infer how the pattern would continue and thus accurately predict the total number. For questions to be answerable, the dataset's images must be filtered down to represent patterns a model or person could recognize.", + "bbox": [ + 89, + 763, + 482, + 902 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Our filtering process follows two stages. First, we prompt GPT-4o to determine whether the objects were arranged in a pattern. Second, if the model responded with \"no\", the images were immediately discarded. If the model output was \"yes\", the log probability of the token is stored. Empirically, we found that higher log probability values (i.e. higher confidence scores) corresponded to more well-defined patterns in the image. Thus, we use the log probabilities for filtering.", + "bbox": [ + 511, + 90, + 903, + 227 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Specifically, let $P_{\\mathrm{yes}}$ be the log probability of the \"yes\" token and $T$ denote the threshold for determining how well-defined a pattern is. To filter the images based on pattern rigidity, we apply the following condition: $e^{P_{\\mathrm{yes}}} \\geq T$ . This inequality yields 991 images from the original dataset (16.12%). Next, we manually filter each of the selected images to ensure that they indeed contain patterns and feature a countable number of objects, excluding 34 images. Afterward, we manually place a \"fair\" occluding box in each image, i.e. a box that leaves sufficient portions of the pattern visible, such that the pattern can still be inferred from the unoccluded portions of the image. Occluding boxes were also chosen with varying positions and sizes in the image.", + "bbox": [ + 511, + 234, + 906, + 431 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "C. Additional Analysis", + "text_level": 1, + "bbox": [ + 513, + 467, + 709, + 484 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Here we provide additional experiments that attempt to either increase model performance on CAPTURE or dissect the reasons behind poor model performance. Chain-of-Thought inhibits model performance, while temperature backoff slightly improves performance. Additionally, we find that models struggle at counting just occluded objects, are overconfident in occluded settings, and are biased to predict specific numbers.", + "bbox": [ + 511, + 500, + 905, + 623 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "C.1. Chain-of-Thought reduces model performance", + "text_level": 1, + "bbox": [ + 511, + 652, + 906, + 671 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/8414fceefb0333f2f72e9353299e00198f69eb76a39b1a315c06104d0376da7a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodCAPTURErealCAPTUREsynthetic
GPT-4o14.759.71
GPT-4o w/ CoT14.947.73
Qwen229.3311.74
Qwen2 w/ CoT31.5737.81
", + "bbox": [ + 553, + 702, + 862, + 777 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Table 6. CoT experiments (metric: sMAPE).", + "bbox": [ + 575, + 779, + 841, + 792 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "During development, we experimented with several common strategies including CoT. In Tab. 6, we find that CoT reduces model performance except in the occluded synthetic scenario, most likely because the included examples are very similar to the test prompt.", + "bbox": [ + 511, + 825, + 905, + 902 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/7ad2418d050b057c9166898787a612277b8a7a6e4806fefeed67729879f8a3f8.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelError (%) (↓)
RealSynthetic
UnoccludedOccludedUnoccludedOccluded
Originalw/ backoff (Δ)Originalw/ backoff (Δ)Originalw/ backoff (Δ)Originalw/ backoff (Δ)
GPT-4o13.3412.57 (−0.77)14.7514.39 (−0.36)5.905.93 (+0.03)9.719.23 (−0.48)
InternVL226.1727.09 (+0.92)32.9032.37 (−0.53)16.4415.59 (−0.85)17.5716.24 (−1.33)
Molmo25.9021.23 (−4.67)32.4928.17 (−4.32)8.402.88 (−5.52)17.7315.85 (−1.88)
Qwen2VL18.9619.40 (+0.44)29.3328.47 (−0.86)6.636.66 (+0.03)11.7411.51 (−0.23)
Avg. of 4 VLMs21.0920.07 (−1.02)27.3725.85 (−1.52)9.347.76 (−1.58)14.1913.21 (−0.98)
", + "bbox": [ + 91, + 88, + 903, + 250 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "C.2. Temperature backoff slightly improves model performance", + "text_level": 1, + "bbox": [ + 89, + 327, + 482, + 359 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "To improve VLM performance on CAPTURE, we address a trend we established during early testing. Most of the time, the VLM fails by reaching an incorrect answer. Sometimes, however, our benchmark can cause VLMs to produce a long and irrelevant response that strays from the original prompt, leading to the worst possible sMAPE score (100%).", + "bbox": [ + 89, + 367, + 482, + 460 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "To reduce the number of skipped questions, we experiment with temperature backoff, which iteratively decreases the sampling temperature. Because the answer extractor can immediately identify an incoherent output, we can regenerate the response with a lower temperature to get the model to answer the task properly. Consistent with our findings, Peeperkorn et al. [32] also finds that lower temperatures increase coherence in VLMs, thereby enhancing their chances of maintaining relevance to the prompt. Therefore, temperature backoff gives VLMs a better chance of achieving higher scores. Each time the answer extractor returns an empty answer because the VLMs produced an incoherent answer, we reduce the temperature by 0.1 (starting from 1.0) until it reaches 0.0, at which point the example is skipped.", + "bbox": [ + 88, + 460, + 482, + 689 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Models perform slightly better with temperature backoff. We introduced temperature backoff to reduce model incoherence, and it performed fairly well. As shown in Tab. 7 (bottom), this method slightly improves performance across each model, resulting in an average error reduction of $5.78\\%$ in $\\mathrm{CAPTURE}^{\\mathrm{real}}$ and $5.45\\%$ in $\\mathrm{CAPTURE}^{\\mathrm{synthetic}}$ . Temperature backoff essentially allows the model to reattempt the question if it fails to respond to the prompt. Similar to previous results, positive results from reattempts highlight VLMs' weak reasoning abilities.", + "bbox": [ + 89, + 700, + 482, + 853 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/a0b1d18c5608668c32ad94cfb339eefcb95710ae93d724ddeebaa909da2aae77.jpg", + "table_caption": [ + "Table 7. Comparison of models on CAPTURE across four scenarios (CAPTURE $^{\\text{real}}$ vs. CAPTURE $^{\\text{synthetic}}$ , Unoccluded vs. Occluded). \"Original\" indicates no backoff; \"w/ backoff\" indicates applying backoff, with $\\Delta = (w/ backoff) - (Original)$ . Negative $\\Delta$ values indicate an improvement." + ], + "table_footnote": [], + "table_body": "
ModelError (%) [↓]
All ObjectsOnly Occluded
GPT-4o14.7526.13 (+11.38)
InternVL232.9075.82 (+42.92)
Molmo32.4996.79 (+64.30)
Qwen2VL29.3332.89 (+3.56)
Avg. of 4 VLMs27.3757.91 (+30.54)
", + "bbox": [ + 537, + 324, + 883, + 460 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Table 8. VLM sMAPE for counting all objects and counting only the occluded objects in CAPTURE $^{\\text{real}}$ . Metric: sMAPE (lower is better).", + "bbox": [ + 511, + 470, + 906, + 513 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "C.3. Models struggle at counting just occluded objects", + "text_level": 1, + "bbox": [ + 511, + 540, + 906, + 571 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We separately test whether models can count only the occluded objects (not including the visible objects) in an image. Here, as Tab. 8 demonstrates, the models perform especially poorly in this task, with high error rates across all models. Therefore, we can conclude that occlusion and counting are uniquely difficult for the VLMs, and that the drop in performance between unoccluded and occluded settings in Tab. 2 is likely due to a poor ability to count occluded objects.", + "bbox": [ + 509, + 578, + 906, + 715 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "C.4. Models are overconfident in occluded settings", + "text_level": 1, + "bbox": [ + 511, + 726, + 901, + 742 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We test the uncertainty with two different methods of obtaining confidence on Qwen2VL. In the first method, we prompt Qwen2VL for its confidence in the answer. For the second method, we generate 20 responses for every question in our VQA and calculate the confidence as the percentage of times the most common answer was generated. These results can be seen in Fig. 9 and Fig. 10 respectively. In both reliability curves, there is a slight trend that the model's confidence is negatively correlated with the error, which is the desired outcome. In $\\mathrm{CAPTURE}^{\\mathrm{real}}$ , how-", + "bbox": [ + 509, + 750, + 906, + 900 + ], + "page_idx": 11 + }, + { + "type": "page_footnote", + "text": "2We set $T = 0.9999$ based on manual evaluation, finding it resulted in fewer false positives.", + "bbox": [ + 89, + 875, + 482, + 900 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "ever, the correlation is much stronger. While the models are somewhat calibrated (with generally lower confidence on higher-error examples, there are still outliers in prompted confidence for CAPTURE $^{\\text{real}}$ occluded and sampled confidence for CAPTURE $^{\\text{synthetic}}$ occluded. This indicates that not only do the models perform worse under occlusion, but they can also be overconfident.", + "bbox": [ + 89, + 90, + 483, + 196 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/c611c7dddf48809c8a6e0d57ebe548a151fb1cc3df349e754f8ed43ae455fa99.jpg", + "image_caption": [ + "Figure 9. Reliability curve of prompting model for confidence vs. sMAPE." + ], + "image_footnote": [], + "bbox": [ + 112, + 236, + 444, + 431 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/12bebc6d15e6500bbabf290ea88f67842250f7147b933634b3e9da5cb0518d18.jpg", + "image_caption": [ + "Figure 10. Reliability curve of sampling model for confidence vs. sMAPE." + ], + "image_footnote": [], + "bbox": [ + 112, + 532, + 444, + 729 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "C.5. Models are biased to predict specific numbers.", + "text_level": 1, + "bbox": [ + 89, + 801, + 482, + 818 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To examine where models frequently err, we generated a confusion matrix for every model based on CAPTURE $^{\\text{synthetic}}$ results (shown in Appendix C.5). The y-axis represents the ground truth values and the x-axis represents the model's answers. We find that models often over-predict", + "bbox": [ + 89, + 824, + 483, + 900 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "numbers associated with common counts in real life: GPT-40 tends to predict numbers like 8, 9, 10, and 12, which are all non-prime numbers (i.e. can be arranged into a grid) and common groupings of objects. For example, 12 is a common grouping (dozens) and allows arrangements into 3x4 or 2x6 grids. InternVL and Qwen2VL over-predict 5 and 10, aligning with how humans conceptualize numbers. Indeed, Coupland [12] found that numbers 5, 10, 20, and other round numbers appear disproportionately more in online texts. Molmo has no correlation with these factors, possibly due to its unique \"point and count\" ability.", + "bbox": [ + 511, + 90, + 906, + 258 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "D. VLM Prompts", + "text_level": 1, + "bbox": [ + 513, + 270, + 666, + 287 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We use a 100-example validation set for each setting to select the best prompt, which we report below.", + "bbox": [ + 511, + 295, + 906, + 325 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Prompt for GPT-4o on CAPTURE $^{\\text{real}}$ unoccluded split.", + "text_level": 1, + "bbox": [ + 529, + 339, + 888, + 371 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Count the exact number of [object] in the image. Assume the pattern of [object] continues behind any black box. Provide the total number of [object] as if the black box were not there.", + "bbox": [ + 529, + 378, + 888, + 439 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Prompt for InternVL2 on CAPTURE $^{\\text{real}}$ unoccluded split.", + "text_level": 1, + "bbox": [ + 531, + 467, + 888, + 498 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Your task is to count objects in the image. First, state what the pattern is, then give your final count.", + "bbox": [ + 531, + 506, + 888, + 537 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Prompt for Molmo on CAPTURE $^{\\text{real}}$ unoccluded split.", + "text_level": 1, + "bbox": [ + 531, + 566, + 887, + 598 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Count the exact number of [object] in the image. Only count [object] that are visible within the frame. If [object] are partially in the frame (i.e. if any part of [object] are visible), count it.", + "bbox": [ + 529, + 606, + 888, + 667 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Prompt for Qwen2VL on CAPTURE $^{\\text{real}}$ unoccluded split.", + "text_level": 1, + "bbox": [ + 531, + 696, + 888, + 729 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Count the exact number of [object] in the image. Assume the pattern of [object] continues behind any black box. Provide the total number of [object] as if the black box were not there. Only count [object] that are visible within the frame (or would be visible without the occluding box). If [object] are partially in the frame (i.e. if any part of [object] are visible), count it. If the [object] would be partially in the frame without the occluding box, count it.", + "bbox": [ + 529, + 736, + 888, + 872 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/08f6c9e3444d44c81dce730c5794b8ed561370ede8d77fbdb08e1d8da80f2eaa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 91, + 87, + 495, + 345 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/bc42a00ebe464d45c0fda199fa3866d224671941005433a1ffed10ada4f7fc65.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 498, + 88, + 901, + 345 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/b764ec1f7d7b2599e57b7f0c6a90c64e02b3a7d1446773a3ab1bb37a3fede70d.jpg", + "image_caption": [ + "Figure 11. Confusion matrix: predicted vs. ground truth counts for CAPTURE $^{\\text{real}}$ s occluded split." + ], + "image_footnote": [], + "bbox": [ + 89, + 349, + 493, + 609 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/6bbc087c1a75124b22630e02712bd7e1f1c4ff2a89de05e3a88dda5c6c31f81a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 349, + 901, + 609 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Prompt for GPT-4o, InternVL2, and Qwen2VL on CAPTURE $^{\\text{real}}$ occluded split.", + "text_level": 1, + "bbox": [ + 109, + 666, + 465, + 698 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Count the exact number of [object] in the image. Assume the pattern of [object] continues behind any black box. Provide the total number of [object] as if the black box were not there. Only count [object] that are visible within the frame (or would be visible without the occluding box). If [object] are partially in the frame (i.e. if any part of [object] are visible), count it. If the [object] would be partially in the frame without the occluding box, count it. Molmo: Your task is to count objects in the image. Assume the pattern of [object] continues behind the black box. First, state what the pattern is, then give your final count.", + "bbox": [ + 107, + 705, + 467, + 888 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Prompt for Molmo on CAPTURE $^{\\text{real}}$ occluded split.", + "text_level": 1, + "bbox": [ + 531, + 667, + 887, + 700 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Your task is to count objects in the image. Assume the pattern of [object] continues behind the black box. First, state what the pattern is, then give your final count.", + "bbox": [ + 529, + 707, + 888, + 767 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Prompt for GPT-4o on CAPTUREsynthetic unoccluded split.", + "text_level": 1, + "bbox": [ + 531, + 806, + 888, + 837 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Your task is to count objects in the image. First, state what the pattern is, then give your final count.", + "bbox": [ + 531, + 845, + 887, + 876 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 924, + 508, + 936 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Prompt for InternVL2 on CAPTUREsynthetic unoccluded split.", + "text_level": 1, + "bbox": [ + 107, + 95, + 464, + 127 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Count the exact number of [dot shape]s in the image. Only count [dot shape]s that are visible within the frame. If [dot shape]s are partially in the frame (i.e. if any part of [dot shape]s are visible), count it.", + "bbox": [ + 107, + 135, + 464, + 196 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Prompt for Molmo on CAPTUREsynthetic unoccluded split.", + "text_level": 1, + "bbox": [ + 107, + 227, + 464, + 258 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Count the exact number of [dot shape]s in the image. Only count [dot shape]s that are visible within the frame.", + "bbox": [ + 107, + 267, + 464, + 311 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Prompt for Qwen2VL on CAPTUREsynthetic unoccluded split.", + "text_level": 1, + "bbox": [ + 107, + 340, + 464, + 372 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Count the exact number of [dot shape]s in the image. Assume the pattern of [dot shape]s continues behind any black box. Provide the total number of [dot shape]s as if the black box were not there. Only count [dot shape]s that are visible within the frame (or would be visible without the occluding box). If [dot shape]s are partially in the frame (i.e. if any part of [dot shape]s are visible), count it. If the [dot shape]s would be partially in the frame without the occluding box, count it.", + "bbox": [ + 107, + 381, + 465, + 531 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Prompt for GPT-4o and Molmo on CAP-TUREsynthetic occluded split.", + "text_level": 1, + "bbox": [ + 107, + 561, + 464, + 592 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Your task is to count objects in the image. Assume the pattern of [dot shape]s continues behind the black box. First, state what the pattern is, then give your final count.", + "bbox": [ + 107, + 601, + 464, + 659 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Prompt for InternVL2 and Qwen2VL on CAPTUREsynthetic occluded split.", + "text_level": 1, + "bbox": [ + 107, + 689, + 464, + 720 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Count the exact number of [dot shape]s in the image. Assume the pattern of [dot shape]s continues behind any black box. Provide the total number of [dot shape]s as if the black box were not there. Only count [dot shape]s that are visible within the frame (or would be visible without the occluding box). If [dot shape]s are partially in the frame (i.e. if any part of [dot shape]s are visible), count it. If the [dot shape]s would be partially in the frame without the occluding box, count it.", + "bbox": [ + 107, + 728, + 465, + 878 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 924, + 506, + 936 + ], + "page_idx": 14 + } +] \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15485/a2c7802b-0ba4-4f59-a685-cb9d447cab8d_model.json b/data/2025/2504_15xxx/2504.15485/a2c7802b-0ba4-4f59-a685-cb9d447cab8d_model.json new file mode 100644 index 0000000000000000000000000000000000000000..19187a28eea7a1e720db89a00f377d6a76e4c396 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/a2c7802b-0ba4-4f59-a685-cb9d447cab8d_model.json @@ -0,0 +1,3079 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.275, + 0.061, + 0.723 + ], + "angle": 270, + "content": "arXiv:2504.15485v2 [cs.CV] 13 Aug 2025" + }, + { + "type": "title", + "bbox": [ + 0.145, + 0.13, + 0.856, + 0.177 + ], + "angle": 0, + "content": "CAPTURE: Evaluating Spatial Reasoning in Vision Language Models via Occluded Object Counting" + }, + { + "type": "text", + "bbox": [ + 0.204, + 0.204, + 0.31, + 0.222 + ], + "angle": 0, + "content": "Atin Pothiraj" + }, + { + "type": "text", + "bbox": [ + 0.35, + 0.204, + 0.507, + 0.222 + ], + "angle": 0, + "content": "Elias Stengel-Eskin" + }, + { + "type": "text", + "bbox": [ + 0.548, + 0.205, + 0.643, + 0.221 + ], + "angle": 0, + "content": "Jaemin Cho" + }, + { + "type": "text", + "bbox": [ + 0.684, + 0.205, + 0.793, + 0.22 + ], + "angle": 0, + "content": "Mohit Bansal" + }, + { + "type": "text", + "bbox": [ + 0.429, + 0.223, + 0.569, + 0.24 + ], + "angle": 0, + "content": "UNC Chapel Hill" + }, + { + "type": "text", + "bbox": [ + 0.325, + 0.242, + 0.703, + 0.257 + ], + "angle": 0, + "content": "{atin, esteng, jmincho, mbansal}@cs.unc.edu" + }, + { + "type": "title", + "bbox": [ + 0.249, + 0.292, + 0.327, + 0.308 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.324, + 0.485, + 0.807 + ], + "angle": 0, + "content": "Recognizing and reasoning about occluded (partially or fully hidden) objects is vital to understanding visual scenes, as occlusions frequently occur in real-world environments and act as obstacles for spatial comprehension. To test models' ability to reason about multiple occluded objects, we introduce a novel task, Counting Amodally for Patterns Through Unseen REgions (CAPTURE), which requires a model to count objects arranged in a pattern by inferring how the pattern continues behind an occluder (an object which blocks parts of the scene). CAPTURE requires both recognizing visual patterns and reasoning, making it a useful testbed for evaluating vision-language models (VLMs) on whether they understand occluded patterns and possess spatial understanding skills. By requiring models to reason about occluded objects, CAPTURE also tests VLMs' ability to form world models that would allow them to fill in missing information. CAPTURE consists of two parts: (1) CAPTURE\\(^{\\text{real}}\\), with manually filtered images of real objects in patterns and (2) CAPTURE\\(^{\\text{synthetic}}\\), a controlled diagnostic with generated patterned images. We evaluate four strong VLMs (GPT-4o, Intern-VL2, Molmo, and Qwen2-VL) on CAPTURE, finding that models struggle to count on both occluded and unoccluded patterns. Crucially, we find that models perform worse with occlusion, suggesting that VLMs are also deficient in inferring unseen spatial relationships: even the strongest VLMs like GPT-4o fail to count with occlusion. In contrast, we find that humans achieve very little error on CAPTURE. We also find that providing auxiliary information of occluded object locations increases performance, underscoring that the model error comes both from an inability to handle occlusion as well as difficulty in counting in images.\\(^{1}\\)" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.826, + 0.222, + 0.841 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.848, + 0.483, + 0.879 + ], + "angle": 0, + "content": "Inferring what lies behind different objects in occluded scenes is crucial for human perception, as it allows us to" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.291, + 0.693, + 0.435 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.523, + 0.437, + 0.686, + 0.48 + ], + "angle": 0, + "content": "Instruction: Count the exact number of cups in the image, assuming the pattern continues behind the black box." + }, + { + "type": "image", + "bbox": [ + 0.693, + 0.291, + 0.882, + 0.484 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.512, + 0.496, + 0.907, + 0.552 + ], + "angle": 0, + "content": "Figure 1. CAPTURE example with an output from GPT-4o. While people can easily infer the missing number of cups and correctly reason over occluded patterns, models generally struggle to reason over these occluded scenes." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.568, + 0.907, + 0.825 + ], + "angle": 0, + "content": "maintain a coherent understanding of our environment even when parts are hidden. The human visual system accomplishes this by integrating past experiences, context, and sensory inputs to reconstruct incomplete scenes [19, 27, 30, 45]. Meanwhile, recent advancements in vision-language models (VLMs) – especially in terms of visual and spatial reasoning – raise the question of whether these systems can perform similar inferential tasks. One way of measuring such capabilities is through amodal completion – the task of inferring the invisible parts of partially occluded objects; here, vision-only models are typically evaluated via dense prediction tasks like object segmentation and image inpainting [5]. However, this format is not well-suited for assessing VLMs, whose outputs consist of text tokens rather than pixel-level predictions. This raises a critical question: How can we quantify the ability of VLMs to form spatial world modeling [17] in the presence of occlusion?" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.825, + 0.909, + 0.903 + ], + "angle": 0, + "content": "To address this, we introduce CAPTURE, Counting Amodally for Patterns Through Unseen REgions, a novel benchmark that tests a VLM's world modeling and spatial reasoning abilities through the task of amodal counting, where models are prompted to count occluded objects" + }, + { + "type": "page_footnote", + "bbox": [ + 0.109, + 0.887, + 0.422, + 0.9 + ], + "angle": 0, + "content": "\\(^{1}\\)Code and data: https://github.com/atinpothiraj/CAPTURE" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.332 + ], + "angle": 0, + "content": "by amodally completing a pattern. CAPTURE focuses on counting as it provides an objective and easy-to-verify output by comparing predicted counts with ground truth values. Moreover, patterned objects appear in various real-world domains, especially in man-made environments like parking lots, cities, and warehouses, where counting objects is often required. Fig. 1 illustrates the CAPTURE task. We show a VLM an image where objects are placed in a regular pattern (e.g., a 4x4 grid) with some objects occluded, and ask the model to count the total number of objects in the image assuming that the pattern continues behind the occlusion. The task requires handling occlusion, pattern recognition, and counting skills that exist in humans from a fairly young age [27, 30, 45], thus humans can easily answer such questions – indeed, we find that people can complete CAPTURE tasks with almost no error." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.336, + 0.482, + 0.518 + ], + "angle": 0, + "content": "CAPTURE consists of two subsets: \\(\\mathrm{CAPTURE}^{\\mathrm{real}}\\) and \\(\\mathrm{CAPTURE}^{\\mathrm{synthetic}}\\). As shown in Fig. 2, \\(\\mathrm{CAPTURE}^{\\mathrm{real}}\\) contains real-world images and tests the ability of models to perform amodal counting in naturalistic contexts, while \\(\\mathrm{CAPTURE}^{\\mathrm{synthetic}}\\) allows us to analyze specific factors by controlling different variables like color, shape, and number of objects. All images in \\(\\mathrm{CAPTURE}\\) contain a pattern of objects and a manually annotated occluding black box covering some objects. \\(\\mathrm{CAPTURE}^{\\mathrm{real}}\\) contains 924 images with a diverse range of settings and objects, covering 92 different object types, while \\(\\mathrm{CAPTURE}^{\\mathrm{synthetic}}\\) contains 1250 images across multiple attribute classes." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.52, + 0.483, + 0.779 + ], + "angle": 0, + "content": "By combining vision encoders with large language models (LLMs), VLMs have the potential to reason in a zero-shot way about visual inputs. To put this ability to the test and measure VLMs' ability to reason about missing visual information, we evaluate four strong recent VLMs (GPT40, InternVL2, Molmo, and Qwen2VL) on CAPTURE. Our experiment results (Sec. 4) show that models generally struggle with the multiple aspects of the task, with high error rates on both CAPTURE\\(^{\\text{real}}\\) and CAPTURE\\(^{\\text{synthetic}}\\) for occluded and unoccluded images. In contrast, we find that humans can perform the task easily: whereas model performance deteriorates as more objects in images are occluded, humans complete the task almost perfectly. We also compare VLMs to a vision-only model trained to count visible objects; while this model generally outperforms VLMs, its error is directly tied to the number of occluded objects – the more objects are occluded, the higher its error will be." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.781, + 0.483, + 0.902 + ], + "angle": 0, + "content": "By objectively measuring VLMs' spatial reasoning capabilities under occlusion, CAPTURE highlights an unexpected weakness in VLMs. We analyze this weakness by providing the model with additional clues and information. Specifically, we test to what degree the VLMs' failure stems from an inability to integrate visual information by providing it with a text-based representation of the visible objects in the image in the form of object coordinates; here, VLMs" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.365 + ], + "angle": 0, + "content": "perform substantially better, indicating that their poor performance on CAPTURE stems partly from an inability to count objects in images, rather than an inability to count more generally. Our findings align with previous work, which similarly finds that VLMs struggle to count in images [22, 33, 42]. We also test the degree to which VLM errors stem from an inability to form a world model by providing it with auxiliary information (the coordinates of the occluded objects in text, or inpainting the occluded regions). We find that VLMs perform substantially better with this auxiliary information, suggesting that VLMs are partly limited by their inability to imagine the missing visual information. Addressing these gaps is critical for VLMs to function effectively in real-world scenarios, where visual reasoning often involves occlusions – whether counting stadium seats, components on production lines, or buildings in neighborhoods. We hope that our work will foster future research on improving the world modeling capabilities of VLMs." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.378, + 0.637, + 0.393 + ], + "angle": 0, + "content": "2.CAPTURE" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.403, + 0.664, + 0.418 + ], + "angle": 0, + "content": "2.1. Task Overview" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.426, + 0.905, + 0.789 + ], + "angle": 0, + "content": "Input/output formulation. CAPTURE tests VLMs on occlusion reasoning, pattern recognition, and counting of both visible and occluded objects. VLMs already achieve high accuracy in classifying single, occluded objects [20]. Thus, we also argue that VLMs have the potential to perform well on CAPTURE's challenging task because their proficiency in handling occlusion ought to enable them to recognize occluded objects and reason accordingly. All images in CAPTURE contain a pattern. This makes the task solvable for models and people - if the objects were not placed in a pattern, it would be unreasonable to expect models to infer the position of the occluded objects. For example, given an image of a random pile of coins with a region occluded, it is not easy to infer whether the occluded region contains no coins or contains roughly the same amount as the rest of the pile. For this task, the patterns considered are all regular and fairly small, e.g. grids, circles, triangles, and other regular shapes - see Fig. 2 for further examples. The last step of CAPTURE is counting, asking the model to provide an objectively measurable output. In addition to VLMs, we also test COUNTGD [3], a state-of-the-art object detection-based counting method, finding that it fails to account for the occluded scenario, as its training entails solely predicting the visible, unoccluded objects in the image." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.797, + 0.905, + 0.829 + ], + "angle": 0, + "content": "Metric. We use symmetric mean percent error (sMAPE) as the primary metric. sMAPE is given by:" + }, + { + "type": "equation", + "bbox": [ + 0.597, + 0.84, + 0.905, + 0.88 + ], + "angle": 0, + "content": "\\[\n\\mathrm {s M A P E} = 1 0 0 \\cdot \\frac {1}{n} \\sum_ {i = 1} ^ {n} \\frac {\\left| y _ {i} - \\hat {y} _ {i} \\right|}{\\left| y _ {i} \\right| + \\left| \\hat {y} _ {i} \\right|} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.887, + 0.905, + 0.902 + ], + "angle": 0, + "content": "where \\(y_{i}\\) represents the actual values, \\(\\hat{y}_i\\) represents the pre" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.271, + 0.09, + 0.342, + 0.1 + ], + "angle": 0, + "content": "CAPTUREreal" + }, + { + "type": "text", + "bbox": [ + 0.101, + 0.102, + 0.501, + 0.135 + ], + "angle": 0, + "content": "Instruction: Count the exact number of [object] in the image. Assume the pattern of [object] continues behind any black box. Provide the total number of [object] as if the black box were not there." + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.137, + 0.225, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.097, + 0.217, + 0.225, + 0.272 + ], + "angle": 0, + "content": "GPT-4o: 18 bottle caps visible...black box covers...5 caps...total estimated count...is 23. Ground truth: 19" + }, + { + "type": "image", + "bbox": [ + 0.228, + 0.137, + 0.395, + 0.215 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.241, + 0.218, + 0.374, + 0.273 + ], + "angle": 0, + "content": "GPT-4o: Bread rolls are organized in a 4x4 grid, which suggests there are 16 rolls in total. Ground truth: 20" + }, + { + "type": "image", + "bbox": [ + 0.401, + 0.137, + 0.505, + 0.215 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.396, + 0.217, + 0.512, + 0.282 + ], + "angle": 0, + "content": "GPT-4o: Total nuts in each row...6 times 6 = 36. Nuts in the hidden \\(3 \\times 3\\) section = 9. 36 + 9 = 45 nuts. Ground truth: 42" + }, + { + "type": "title", + "bbox": [ + 0.667, + 0.089, + 0.756, + 0.1 + ], + "angle": 0, + "content": "CAPTUREsynthetic" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.102, + 0.884, + 0.136 + ], + "angle": 0, + "content": "Instruction: Your task is to count objects in the image. Assume the pattern of [object] continues behind the black box. First, state what the pattern is, then give your final count." + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.139, + 0.634, + 0.214 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.52, + 0.217, + 0.644, + 0.272 + ], + "angle": 0, + "content": "GPT-4o: Pattern: A circular arrangement... Visible count: ...8 ... Total count: ...12 dots. Ground truth: 11" + }, + { + "type": "image", + "bbox": [ + 0.644, + 0.141, + 0.775, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.646, + 0.217, + 0.777, + 0.293 + ], + "angle": 0, + "content": "GPT-4o: ...pattern consists of...a 2x2 grid... can infer...an additional four blue squares in the obscured section. Final count: 8 blue squares Ground truth: 6" + }, + { + "type": "image", + "bbox": [ + 0.787, + 0.14, + 0.894, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.782, + 0.217, + 0.905, + 0.293 + ], + "angle": 0, + "content": "GPT-4o: Pattern: Triangular arrangement ...Final Count:7 red dots (5 visible \\(^+\\) 2 estimated behind the black box). Ground truth:6" + }, + { + "type": "image_caption", + "bbox": [ + 0.18, + 0.304, + 0.816, + 0.32 + ], + "angle": 0, + "content": "Figure 2. Example images with GPT-4o responses to CAPTURE\\(^{\\text{real}}\\) and CAPTURE\\(^{\\text{synthetic}}\\) occluded splits." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.345, + 0.484, + 0.436 + ], + "angle": 0, + "content": "dicted values, and \\( n \\) is the number of observations. sMAPE is capped at \\( 100\\% \\), providing a fixed range. This makes sMAPE ideal for challenging tasks like ours, as we can penalize responses that fail to produce an answer with a maximum error of \\( 100\\% \\). For a justification of sMAPE over other metrics, see Appendix A.1." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.446, + 0.188, + 0.46 + ], + "angle": 0, + "content": "2.2. Dataset" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.468, + 0.483, + 0.695 + ], + "angle": 0, + "content": "CAPTURE\\(^{\\text{real}}\\). We introduce a set of real images with patterns to test amodal counting in naturalistic settings. The original images and annotations come from the FSC-147 dataset [37], a diverse counting dataset with manual annotations for the number of target objects and all object bounding boxes in each image. FSC-147 contains a diverse array of objects, with 6146 real-world images across 147 object categories. We filter FSC-147 for images that contain identifiable and regular patterns of objects and manually overlay a black box to occlude some objects, resulting in 924 images. Filtering is first performed with GPT-4o and then manually verified; we also manually verify that determining objects despite the occlusion is feasible. For each example, we maintain both occluded and unoccluded versions. Further details on CAPTURE\\(^{\\text{real}}\\) can be found in Appendix B." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.703, + 0.483, + 0.87 + ], + "angle": 0, + "content": "CAPTUREsynthetic. While CAPTUREreal makes CAPTURE more applicable to real-world scenarios, each image is unique, making the data less controlled and challenging to draw clear conclusions about model performance. Images without background distractors, texture variance, and other potential visual obstacles provide a more controlled version of the task. Therefore, we create CAPTUREsynthetic to examine the task in a fully controlled environment. CAPTUREsynthetic comprises 1250 images of simple objects in patterns, where different variables are held constant or changed. We vary the following features:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.871, + 0.345, + 0.885 + ], + "angle": 0, + "content": "1. Object count: varies from 5 to 15." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.886, + 0.361, + 0.901 + ], + "angle": 0, + "content": "2. Object: can be either dots or squares." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.871, + 0.361, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.342, + 0.905, + 0.453 + ], + "angle": 0, + "content": "
CAPTURErealCAPTUREsynthetic
# Images9241250
# Object Types922
Avg. Occluded Obj.13.972.73
Avg. Total Obj.61.4510.00
StrengthsDiverse Objects/SettingsConfounder-free
NaturalisticControllable Attributes
Realistic ContextUniformly Distributed
" + }, + { + "type": "table_caption", + "bbox": [ + 0.548, + 0.463, + 0.871, + 0.478 + ], + "angle": 0, + "content": "Table 1. Statistics and strengths for CAPTURE splits." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.501, + 0.905, + 0.53 + ], + "angle": 0, + "content": "3. Arrangement/shape: can be a rectangle, circle, or pyramid (where feasible based on object count)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.531, + 0.905, + 0.56 + ], + "angle": 0, + "content": "4. Location: we consider five positions on the page: center, top-left, top-right, bottom-left, or bottom-right." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.561, + 0.905, + 0.591 + ], + "angle": 0, + "content": "5. Color: we randomly choose one of 5 colors for all the objects in an image." + }, + { + "type": "list", + "bbox": [ + 0.513, + 0.501, + 0.905, + 0.591 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.595, + 0.906, + 0.641 + ], + "angle": 0, + "content": "The \\(\\mathrm{CAPTURE}^{\\mathrm{synthetic}}\\) data is split similarly to the \\(\\mathrm{CAPTURE}^{\\mathrm{real}}\\) data; each configuration has a variant with an overlaid occluding box and one without." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.663, + 0.733, + 0.68 + ], + "angle": 0, + "content": "2.3. Statistics and Examples" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.689, + 0.907, + 0.901 + ], + "angle": 0, + "content": "Fig. 2 shows examples from \\(\\mathrm{CAPTURE}^{\\mathrm{real}}\\) and \\(\\mathrm{CAPTURE}^{\\mathrm{synthetic}}\\) paired with their corresponding answers from GPT-4o and their ground truth answers. These examples show the range of objects and patterns in the dataset and highlight the task's feasibility for humans. Tab. 1 reports summary statistics for \\(\\mathrm{CAPTURE}\\), including the number of images and object types, as well as the mean number of occluded and total objects in both splits of \\(\\mathrm{CAPTURE}\\). The number of objects in \\(\\mathrm{CAPTURE}^{\\mathrm{real}}\\) is shown in Fig. 3, where most images have between 0 and 30 objects. On \\(\\mathrm{CAPTURE}^{\\mathrm{synthetic}}\\), the maximum number of objects is 15, and \\(\\mathrm{CAPTURE}^{\\mathrm{synthetic}}\\) images generally have 1-6 occluded objects (shown in Fig. 4, as further occlusion could make the count unresolvable)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.134, + 0.096, + 0.436, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.145, + 0.235, + 0.428, + 0.25 + ], + "angle": 0, + "content": "Figure 3. # of objects in CAPTURE\\(^{\\text{real}}\\) images." + }, + { + "type": "image", + "bbox": [ + 0.137, + 0.263, + 0.436, + 0.375 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.106, + 0.39, + 0.467, + 0.405 + ], + "angle": 0, + "content": "Figure 4. # of occluded objects in CAPTUREsynthetic images." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.419, + 0.267, + 0.435 + ], + "angle": 0, + "content": "3. Experiment Setup" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.443, + 0.184, + 0.457 + ], + "angle": 0, + "content": "3.1. Models" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.465, + 0.483, + 0.768 + ], + "angle": 0, + "content": "We experiment with GPT-4o [28], Intern-VL2-Llama3-8B [9, 10], Qwen2-VL-7B [41], MiniCPM-o 2.6 [47], and Kimi-VL-A3B [40] for their high scores on other VLM tasks [29]. We add Molmo 7B-D [13], because of its ability to \"point and count,\" giving it a potential advantage on CAPTURE. Specifically, Molmo is trained on millions of examples that directly ground text to 2D coordinates (or \"points\") in images. This allows Molmo to directly point to image coordinates and count more easily by pointing to several objects. All the VLMs feature a different language backbone and vision encoder to provide broad coverage of model architectures. To evaluate models, we provide the model with the name of the specific object to be counted and the explicit instruction to count fully visible objects and objects behind the occluding box (in the occluded images). For each model, we test ten prompts on a validation set of 100 images, selecting the best prompt for each model in each dataset section (CAPTURE\\(^{\\text{real}}\\)/CAPTURE\\(^{\\text{synthetic}}\\)) and for each environment (occluded/unoccluded). We provide the selected prompts in Appendix D." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.779, + 0.396, + 0.793 + ], + "angle": 0, + "content": "3.2. Answer Generation and Extraction" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.801, + 0.482, + 0.862 + ], + "angle": 0, + "content": "Given the complex nature of CAPTURE, we allow models to generate open-ended responses and then subsequently extract answers. Further details (including the maximum number of tokens) can be found in Appendix A.2." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.871, + 0.483, + 0.903 + ], + "angle": 0, + "content": "Answer extraction. Empirically, we found that constraining the output to a specific format for ease of analysis neg-" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.302 + ], + "angle": 0, + "content": "atively impacted benchmark performance. Therefore, we instead prompt models to generate freely and extract the final output number using a separate answer extractor based on Llama 3.1 8B [1]. This answer extractor takes the output from the model as input and prompts it to extract a single number representing the final answer. The answer extractor also identifies if an output failed to converge on a singular number answer and assigns a label to these examples. We mark such incomplete/incoherent model generations as 'skipped' questions and when calculating the error later, these responses are assigned the worst possible sMAPE score (100%). The answer extractor outputs were manually verified on 1000 outputs, and the extractor was found to be 100% accurate." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.313, + 0.905, + 0.435 + ], + "angle": 0, + "content": "Human and object detection baselines. We also report the performance of humans and a recent counting model (COUNTGD [3]) as baselines to establish a point of reference for model performance. To confirm that humans can perform the CAPTURE task, we provided 100 randomly selected occluded examples each from the CAPTURE\\(^{\\text{real}}\\) and CAPTURE\\(^{\\text{synthetic}}\\) subsets to 3 undergraduate students with no prior knowledge of the task." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.448, + 0.713, + 0.465 + ], + "angle": 0, + "content": "4. Results and Analysis" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.473, + 0.776, + 0.488 + ], + "angle": 0, + "content": "4.1. Main Results on CAPTUREreal" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.499, + 0.905, + 0.846 + ], + "angle": 0, + "content": "Models consistently struggle with counting and perform worse on occluded images. We run the VLMs on the occluded and unoccluded versions of CAPTURE to discern whether occlusion significantly impacts model performance. Tab. 2 shows that all models struggle with counting generally, performing poorly on both splits. Moreover, we see that every model performs better on the unoccluded images. On average, the models perform \\(6.28\\%\\) worse in \\(\\mathrm{CAPTURE}^{\\mathrm{real}}\\) occluded images and \\(4.85\\%\\) worse in \\(\\mathrm{CAPTURE}^{\\mathrm{synthetic}}\\) occluded images (in terms of absolute sMAPE), indicating increased difficulty from a standard counting task. The best model for both splits, GPT-4o, has an error rate of \\(14.75\\%\\) on \\(\\mathrm{CAPTURE}^{\\mathrm{real}}\\) and a lower error rate of \\(9.71\\%\\) on \\(\\mathrm{CAPTURE}^{\\mathrm{synthetic}}\\). Across both the real and synthetic split, GPT-4o's error increases with occlusion, by \\(1.41\\%\\) on the real data and \\(3.81\\%\\) on the synthetic split. Interestingly, despite its fine-tuning on counting tasks, Molmo exhibits a sizable error rate of \\(32.5\\%\\) on \\(\\mathrm{CAPTURE}^{\\mathrm{real}}\\) occluded images. The high error rates of VLMs indicate limited capabilities in visual understanding under occlusions, pattern recognition, and counting. We further analyze the source of these errors with oracle experiments in Sec. 4.3." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.856, + 0.905, + 0.901 + ], + "angle": 0, + "content": "Humans complete the task with almost no error. Tab. 3, evaluated on a 100-example subset of each split, confirms that humans complete the task with ease despite occlusion," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.504, + 0.936 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.256, + 0.089, + 0.74, + 0.254 + ], + "angle": 0, + "content": "
ModelError (%) [↓]
CAPTURErealCAPTUREsynthetic
Originalw/ Occlusion (Δ)Originalw/ Occlusion (Δ)
GPT-4o13.3414.75 (+1.41)5.909.71 (+3.81)
InternVL226.1732.90 (+6.73)16.4417.57 (+1.13)
Molmo25.9032.49 (+6.59)8.4017.73 (+9.33)
Qwen2VL18.9629.33 (+10.37)6.6311.74 (+5.11)
MiniCPM-o 2.623.8430.08 (+6.24)17.0619.00 (+1.94)
Kimi-VL-A3B23.4825.96 (+2.48)16.9118.07 (+1.16)
Avg. of 6 VLMs21.9527.59 (+5.64)11.8915.64 (+3.75)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.119, + 0.265, + 0.877, + 0.278 + ], + "angle": 0, + "content": "Table 2. Results across VLMs on all splits of CAPTURE, with average error for each column. Metric: sMAPE (lower is better)." + }, + { + "type": "table", + "bbox": [ + 0.111, + 0.302, + 0.462, + 0.472 + ], + "angle": 0, + "content": "
ModelError (%) [↓]
CAPTURErealCAPTUREsynthetic
(Baseline)
Human3.790.92
(VLMs)
GPT-4o14.759.71
InternVL232.9017.57
Molmo32.4917.73
Qwen2VL29.3311.74
Avg. of 4 VLMs27.3714.19
" + }, + { + "type": "table_caption", + "bbox": [ + 0.091, + 0.481, + 0.482, + 0.51 + ], + "angle": 0, + "content": "Table 3. Human baseline vs VLMs on CAPTURE\\(^{\\text{real}}\\) and CAPTURE\\(^{\\text{synthetic}}\\) (occluded split). Metric: sMAPE (lower is better)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.526, + 0.483, + 0.603 + ], + "angle": 0, + "content": "with an sMAPE of \\(3.79\\%\\) on \\(\\mathrm{CAPTURE}^{\\mathrm{real}}\\) and \\(0.92\\%\\) on \\(\\mathrm{CAPTURE}^{\\mathrm{synthetic}}\\). On the same subset of examples, models performed 7 times worse on \\(\\mathrm{CAPTURE}^{\\mathrm{real}}\\) and 14 times worse on \\(\\mathrm{CAPTURE}^{\\mathrm{synthetic}}\\) than humans, underscoring the gap between VLMs and humans in this task." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.613, + 0.483, + 0.779 + ], + "angle": 0, + "content": "Object detection-based baseline outperforms VLMs. We attempt the task with a strong object detection-based model to highlight that a standard counting approach will experience a greater loss going from unoccluded to occluded environments, as it cannot capture any occluded objects, i.e. cannot reason. We choose COUNTGD [3], the top solution for unoccluded counting on FSC-147, on which it was trained. Because we draw our images from FSC-147's train and test sets, and COUNTGD trains on FSC-147, we only evaluate COUNTGD on the subset of our data sourced from the FSC-147 test set, consisting of 149 images." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.78, + 0.483, + 0.902 + ], + "angle": 0, + "content": "We find that COUNTGD deteriorates by \\(7.19\\%\\) on occluded images, increasing from \\(3.15\\%\\) sMAPE to \\(10.34\\%\\) as observed in Fig. 5. As expected, COUNTGD outperforms all VLMs on the unoccluded split as it is trained for counting on FSC-147. COUNTGD also outperforms the VLMs on the occluded split, reinforcing that only counting the visible objects is a hard-to-beat baseline. However, the drop in performance with occlusion is greater than the average" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.306, + 0.9, + 0.398 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.411, + 0.907, + 0.453 + ], + "angle": 0, + "content": "Figure 5. VLM vs. VLM + CountGD hybrid on questions from the CAPTURE\\(^{\\text{real}}\\) (occluded split) that are not in COUNTGD training set. Metric: sMAPE (lower is better)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.473, + 0.906, + 0.55 + ], + "angle": 0, + "content": "VLM's drop, highlighting a disadvantage of non-reasoning solutions on CAPTURE: their error is necessarily tied directly to the number of occluded objects and they cannot address the task on their own, whereas a VLM might be able to infer missing objects via reasoning." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.558, + 0.907, + 0.71 + ], + "angle": 0, + "content": "Hybrid VLM counting systems improve performance. Finding that COUNTGD is far better at counting visible objects than VLMs, we leverage the advantage that COUNTGD has by feeding its visible object count information to the VLMs as part of the prompt. As expected, Fig. 5 illustrates that there is a considerable decrease in error when CountGD and the VLMs are combined. However, this hybrid system still performs worse than COUNTGD alone, indicating VLMs are still subpar even at counting just occluded objects (as further reinforced by Appendix C.3)." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.719, + 0.892, + 0.733 + ], + "angle": 0, + "content": "4.2. Effect of Data Factors on VLM Performance" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.74, + 0.906, + 0.862 + ], + "angle": 0, + "content": "Here, we use the CAPTURE\\(^{\\text{synthetic}}\\) data (which can be controlled precisely and fully annotated) to examine which features correlate with model performance. We test the effect of the following variables on final performance: (1) Increasing the number of occluded objects; (2) Varying the pattern. We also investigate whether models can classify patterns, and to what degree models can predict the number of occluded objects only (rather than the total)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.871, + 0.906, + 0.901 + ], + "angle": 0, + "content": "Models perform worse when more dots are occluded. In Fig. 6 (right), we observe that error increases with re" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.504, + 0.936 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.179, + 0.092, + 0.51, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.514, + 0.092, + 0.815, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.241, + 0.908, + 0.27 + ], + "angle": 0, + "content": "Figure 6. Effect of number of total objects in the image and number of occluded objects on sMAPE from \\(\\mathrm{CAPTURE}^{\\mathrm{synthetic}}\\) (occluded split). Metric: sMAPE (lower is better)." + }, + { + "type": "image", + "bbox": [ + 0.113, + 0.291, + 0.456, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.457, + 0.483, + 0.486 + ], + "angle": 0, + "content": "Figure 7. Effect of pattern type in CAPTURE\\(^{\\text{synthetic}}\\) (occluded split) on sMAPE. Metric: sMAPE (lower is better)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.503, + 0.484, + 0.625 + ], + "angle": 0, + "content": "spect to the number of occluded dots. However, Fig. 6 (left) also shows that performance is less affected by the total number of dots. This suggests that the task difficulty is more closely correlated with the difficulty of occlusion – i.e. the difficulty of the world modeling task – rather than the complexity of the pattern. Some models, such as GPT-4o, deviate from this trend, which has lower error on specific numbers. We further explore model bias in Appendix C.5." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.635, + 0.484, + 0.726 + ], + "angle": 0, + "content": "Performance depends on pattern type. The controllability of \\(\\mathrm{CAPTURE}^{\\mathrm{synthetic}}\\) allows us to measure the effect of pattern type on performance. In Fig. 7, we find that model performance differs across shapes with some regularity: objects arranged in a circle generally have lower sMAPE than other shapes, across all models. Qwen2VL has an espe" + }, + { + "type": "table", + "bbox": [ + 0.132, + 0.741, + 0.441, + 0.864 + ], + "angle": 0, + "content": "
ModelAccuracy (%) [↑]
Originalw/ Occlusion (Δ)
GPT-4o84.0078.52 (-5.48)
InternVL268.5247.48 (-21.04)
Molmo80.7065.22 (-15.48)
Qwen2VL88.3586.43 (-1.92)
Avg. of 4 VLMs80.3969.41 (-10.98)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.875, + 0.483, + 0.903 + ], + "angle": 0, + "content": "Table 4. VLM accuracy in identifying the correct pattern in CAPTUREsynthetic. Metric: accuracy (higher is better)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.29, + 0.905, + 0.321 + ], + "angle": 0, + "content": "cially large decrease in error when given circular arrangements compared to rectangles or triangles." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.33, + 0.907, + 0.556 + ], + "angle": 0, + "content": "Models can identify patterns. To determine how much model errors can be attributed to a lack of pattern recognition ability, we formulate a separate task where models must recognize the pattern in the image on CAPTURE\\(^{\\text{synthetic}}\\). Here, we frame the task as multiple-choice, asking the model to select from the pattern types available (rectangle, triangle, or circle). Table 4 illustrates that all perform substantially better than random at this task, with most models except InternVL2 achieving accuracy above \\(80\\%\\) in the unoccluded setting. As expected, the patterns were easier to identify in unoccluded scenarios, with models suffering an average accuracy drop of \\(10.95\\%\\) in the occluded setting. Notably, GPT-4o and Qwen2VL have a fairly small drop in performance, suggesting they can generally capture the pattern even in the presence of occlusion." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.567, + 0.828, + 0.583 + ], + "angle": 0, + "content": "4.3. Analysis with Auxiliary Information" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.589, + 0.907, + 0.831 + ], + "angle": 0, + "content": "In Sec. 4.1, we see that models broadly struggle with amodal counting. Here, we seek to disentangle whether this problem results from a failure to reason, the absence of a world model, or both by giving VLMs two different types of auxiliary information: oracle information and predicted information. Oracle information is ground truth and is directly pulled from CAPTURE's metadata, e.g., object locations. Predicted information generates new information from a completely separate model and gives it to the VLM. This information is not ground truth and is sourced from an external model, such as an image inpainting model, rather than the VLM. By giving the model auxiliary information in the form of reasoning and spatial clues, we can establish how much of each model's error results from an inability to handle occlusion rather than an inability to recognize and count visible objects." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.84, + 0.909, + 0.901 + ], + "angle": 0, + "content": "Oracle setup. We test two oracles for \\(\\mathsf{CAPTURE}^{\\mathsf{real}}\\)'s occluded split based on its constituent subtasks: counting the visible objects and inferring/counting occluded objects. Both oracles provide the VLM with text-based coordinates" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.926, + 0.504, + 0.936 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.152, + 0.094, + 0.242, + 0.104 + ], + "angle": 0, + "content": "With Occlusion" + }, + { + "type": "image", + "bbox": [ + 0.099, + 0.106, + 0.297, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.1, + 0.2, + 0.291, + 0.265 + ], + "angle": 0, + "content": "With Occlusion Prompt: Count the exact number of cans in the image. Assume the pattern of cans continues behind any black box. Provide the total number of cans as if the black box were not there." + }, + { + "type": "image_caption", + "bbox": [ + 0.318, + 0.094, + 0.481, + 0.105 + ], + "angle": 0, + "content": "All Object Coordinate Oracle" + }, + { + "type": "image", + "bbox": [ + 0.303, + 0.106, + 0.498, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.303, + 0.2, + 0.497, + 0.299 + ], + "angle": 0, + "content": "(w/Oracle information) \nAll Object Coordinate Oracle Prompt: Count the exact number of cans in the image, including behind the black box... Coordinates of all cans:59,43219,38356,43 522,3663,18073,335214 186),379,184),524,177220 332372,329525,325" + }, + { + "type": "image_caption", + "bbox": [ + 0.507, + 0.094, + 0.694, + 0.105 + ], + "angle": 0, + "content": "Visible Object Coordinate Oracle" + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.106, + 0.7, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.2, + 0.684, + 0.288 + ], + "angle": 0, + "content": "(w/Oracle information) \nVisible Object Coordinate \nOracle Prompt: Count the exact number of cans in the image, including behind the black box... \nCoordinates of visible cans: (59, 43), (219, 38), (356, 43), (522, 36), (63, 180), (73, 335)" + }, + { + "type": "image_caption", + "bbox": [ + 0.748, + 0.095, + 0.856, + 0.105 + ], + "angle": 0, + "content": "Inpainting Pipeline" + }, + { + "type": "image", + "bbox": [ + 0.704, + 0.106, + 0.901, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.713, + 0.2, + 0.884, + 0.243 + ], + "angle": 0, + "content": "(w/ Predicted information) \nInpainting Pipeline Prompt: \nCount the exact number of cans in the image." + }, + { + "type": "image_caption", + "bbox": [ + 0.712, + 0.254, + 0.89, + 0.288 + ], + "angle": 0, + "content": "(Fading added only for emphasis to visualize infilling. Final image given to VLM is not faded)" + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.305, + 0.907, + 0.361 + ], + "angle": 0, + "content": "Figure 8. Example image and text inputs for experiments with auxiliary information experiments (Sec. 4.3). Blue eyes indicate objects for which the All Object Coordinate Oracle or Visible Object Coordinate Oracle extracts coordinates. The brighter part of the image represents the area which Inpainting Pipeline fills in. Example prompts are shown in italics. Blue eye overlays and faded parts of images are for demonstration purposes and are not passed with the image." + }, + { + "type": "table", + "bbox": [ + 0.162, + 0.375, + 0.835, + 0.482 + ], + "angle": 0, + "content": "
ModelOriginalw/ OcclusionOracle InformationPredicted Information
+ All Coordinates (Δ)+ Visible (Δ)+ Inpainting (Δ)
GPT-4o13.3414.752.93 (-11.82)9.20 (-5.55)15.89 (+1.14)
InternVL226.1732.9017.48 (-15.42)25.13 (-7.77)31.12 (-1.78)
Qwen2VL18.9629.339.62 (-19.71)17.70 (-11.63)22.64 (-6.69)
Avg. of 3 VLMs19.4925.6610.01 (-15.65)17.34 (-8.32)23.22 (-2.44)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.101, + 0.493, + 0.893, + 0.507 + ], + "angle": 0, + "content": "Table 5. Effect of auxiliary information on occluded CAPTURE \\( {}^{\\text{real. }}\\Delta = \\) (Auxiliary Information) - (w/ Occlusion). Metric: sMAPE." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.534, + 0.486, + 0.791 + ], + "angle": 0, + "content": "of objects in the image, simplifying the visual task by assuming the VLM effectively has a perfect visual system that can recognize and localize objects in the image. The first oracle, the Visible Object Coordinate Oracle, gives the VLM the coordinates of all unoccluded objects (encoded as text, as seen in Fig. 8) and instructs the model to estimate the number of occluded objects, count the number of visible object coordinates, and add the two. In other words, the model is given oracle information about what objects are visible, thus also revealing key information about the pattern. The second oracle, the All Object Coordinate Oracle, instead gives the model the coordinates of all objects. Here, the model only needs to count the coordinates in the prompt, eliminating the need to reason on the visual input. Note that Molmo is excluded in these tests because it contains a prompt limit that would truncate the list of coordinates. An example of the oracle inputs can be seen in Fig. 8." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.81, + 0.485, + 0.902 + ], + "angle": 0, + "content": "Prediction setup. In this setting, we provide the VLM with an external world model representation predicted by another model. Specifically, we develop the Inpainting Pipeline to fill in the occluded region via a diffusion-based inpainting model and pass the inpainted image to the VLMs. For the inpainting model, we choose FLUX.1-Fill [dev]," + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.534, + 0.907, + 0.581 + ], + "angle": 0, + "content": "whose backbone FLUX.1 [dev] [21] is a top public model in the Text to Image Model Arena [7]. An example input to the VLM can be seen on the far-right of Fig. 8." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.599, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Providing visible or all object coordinates improves performance substantially. The results in Tab. 2 indicate that models struggle on CAPTURE, which requires identifying a pattern and counting both visible and occluded objects. Moreover, models generally struggle with counting even in unoccluded settings. Both oracles simplify the counting task: All Object Coordinate Oracle reduces the task to simply counting coordinates with no reasoning involved, and Visible Object Coordinate Oracle similarly simplifies the task for visible objects, while still requiring inferring occluded objects. Additionally, under Visible Object Coordinate Oracle, recognizing the pattern shifts from a visual reasoning task to an augmented math problem. Instead of visually reasoning about where objects are located, the VLM considers what patterns the coordinates could make. Translating this task into a text problem results in an average increase of \\(15\\%\\) with all objects coordinate oracle; the errors LLMs make here are due to an inability to count in the text prompt, as opposed to weaknesses in handling occlusion (since all object coordinates are given), and the strongest" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.936 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.094, + 0.092, + 0.481, + 0.211 + ], + "angle": 0, + "content": "model, GPT-4o, achieves minimal error here. We also obtain an average increase of \\(8\\%\\) with the visible objects coordinate oracle (shown in Tab. 5), possibly because it allows the more powerful LLM backbone (which is far larger than the vision model in all models tested) to complete the counting task. Taken together, these results suggest that there is much room for improvement in visual world modeling beyond text-based reasoning of VLMs." + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.224, + 0.482, + 0.464 + ], + "angle": 0, + "content": "Providing diffusion-based inpainting improves performance marginally. Similar to the object coordinate oracles, the Inpainting Pipeline (rightmost columns in Fig. 8 and Tab. 5) eliminates the need for world modeling and provides VLMs with an approximation of the image behind the occluder. With the inpainted images, VLM error decreases by almost \\(2\\%\\) for InternVL2 and \\(7\\%\\) for Qwen2VL compared to the original occluded images. GPT-4o's error increases on inpainted images by a small margin; we hypothesize that this may be because GPT-4o has one of the better world models (based on its superior performance), and thus does not improve further with the inpainted images. Moreover, every VLM still falls short of its unoccluded image performance, indicating that the diffusion model is not a perfect world model. Qualitatively, we find that the inpainting model sometimes fails to output the correct pattern." + }, + { + "type": "title", + "bbox": [ + 0.095, + 0.484, + 0.23, + 0.497 + ], + "angle": 0, + "content": "5. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.513, + 0.482, + 0.724 + ], + "angle": 0, + "content": "Spatial reasoning in visual question answering. Past work measures the spatial reasoning capabilities of VLMs in the form of visual question answering (VQA) [4, 16] benchmarks. SpartQA [26] asks VLMs to identify the spatial relation (e.g., above, behind, left of) between objects in synthetically created 2D images from NLVR [39]. More recent benchmarks test similar spatial relation understanding with real images [2, 24, 36]. While this past work asks models to provide a text description for a relation between two fully observed objects, CAPTURE measures the world modeling from a partially observed scene, thus requiring the handling of occlusion, pattern recognition, and counting. Together, these constitute a stricter test of spatial reasoning than typical VQA settings." + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.736, + 0.482, + 0.9 + ], + "angle": 0, + "content": "Amodal completion. Occlusions are common in natural scenes, and vision solutions for amodal completion have made significant progress in infilling occlusions [6, 38, 46]. The amodal completion task has evolved from simply completing a shape to filling in appearance (e.g., texture, color, etc.) to finally dealing with fine-grained order perception (multiple stacked occluded objects) [5]. Specifically in Qiu and Di [34], VLMs classify the hidden objects and extract fine details from occluded items. CAPTURE, however, presents a unique category of patterned amodal counting which requires inferring fully occluded objects based on a" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.092, + 0.905, + 0.212 + ], + "angle": 0, + "content": "pattern rather than inferring occluded object wholes based on object parts. In other words, previous work has only attempted tasks that require amodal completion for one object at a time [31, 38, 46], whereas CAPTURE handles multiple objects. Multi-object amodal completion is crucial because in cluttered scenes, entire groups of objects are often occluded. Moreover, the output space of CAPTURE is language (rather than filling pixels)." + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.222, + 0.905, + 0.462 + ], + "angle": 0, + "content": "Counting with vision-and-language models. Within the task of counting, the most similar application to CAPTURE is dense counting, where the objects to be counted occlude each other. There are many practical applications of such a task, like counting cells on a crowded slide [8], determining crop yields from densely-packed fields [43], or crowd counting [14, 44, 48]. Liang et al. [23] improved crowd counting with an augmented CLIP [35], i.e. also using VLMs for counting. Additionally, Jenkins et al. [18] introduced an amodal counting benchmark, presenting an occluded 3D counting task where models must count objects on retail shelves. However, our work differs in many ways, as Jenkins et al. [18] only counts retail shelves and uses Li-DAR input. More broadly, dense counting focuses on overlapping objects rather than on counting objects arranged into patterns, which is the focus of CAPTURE." + }, + { + "type": "title", + "bbox": [ + 0.517, + 0.477, + 0.631, + 0.491 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.501, + 0.905, + 0.727 + ], + "angle": 0, + "content": "We introduced CAPTURE, a novel benchmark for amodal counting that measures spatial reasoning capabilities under occlusion. CAPTURE is designed to assess VLMs' ability to form a robust world model and use that model for visual reasoning skills under occlusion. By testing counting, we cast the problem as a measurable task with an objective correct answer that also has real-world utility as VLMs become more broadly adopted. Our results suggest that VLMs struggle to combine reasoning, counting, and world modeling with low performance on occluded and unoccluded images. Our analysis indicates that models improve with oracle information about visible objects (simplifying the reasoning/counting tasks) and predicted information about the occluded objects (also simplifying world modeling), pointing to directions of model improvement." + }, + { + "type": "title", + "bbox": [ + 0.517, + 0.741, + 0.671, + 0.756 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.765, + 0.905, + 0.9 + ], + "angle": 0, + "content": "This work was supported by DARPA ECOLE Program No. HR00112390060, NSF-CAREER Award 1846185, NSF-AI Engage Institute DRL-2112635, DARPA Machine Commonsense (MCS) Grant N66001-19-2-4031, ARO Award W911NF2110220, ONR Grant N00014-23-1-2356, Microsoft Accelerate Foundation Models Research (AFMR) grant program, and a Bloomberg Data Science PhD Fellowship. The views contained in this article are those of the authors and not of the funding agency." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.926, + 0.504, + 0.936 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.093, + 0.09, + 0.188, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.115, + 0.482, + 0.141 + ], + "angle": 0, + "content": "[1] AI@Meta. Llama 3.1 model card. *Github Model Card*, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.144, + 0.483, + 0.212 + ], + "angle": 0, + "content": "[2] Haider Al-Tahan, Quentin Garrido, Randall Balestriero, Diane Bouchacourt, Caner Hazirbas, and Mark Ibrahim. Unibench: Visual reasoning requires rethinking vision-language beyond scaling. arXiv preprint arXiv:2408.04810, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.215, + 0.483, + 0.255 + ], + "angle": 0, + "content": "[3] Niki Amini-Naeni, Tengda Han, and Andrew Zisserman. Countgd: Multi-modal open-world counting. arXiv preprint arXiv:2407.04619, 2024. 2, 4, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.257, + 0.483, + 0.324 + ], + "angle": 0, + "content": "[4] Stanislaw Antol, Aishwarya Agrawal, Jiasen Lu, Margaret Mitchell, Dhruv Batra, C Lawrence Zitnick, and Devi Parikh. Vqa: Visual question answering. In Proceedings of the IEEE international conference on computer vision, pages 2425-2433, 2015. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.327, + 0.483, + 0.367 + ], + "angle": 0, + "content": "[5] Jiayang Ao, Qiuhong Ke, and Krista A Ehinger. Image amodal completion: A survey. Computer Vision and Image Understanding, 229:103661, 2023. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.369, + 0.483, + 0.41 + ], + "angle": 0, + "content": "[6] Jiayang Ao, Yanbei Jiang, Qiuhong Ke, and Krista A Ehinger. Open-world amodal appearance completion. arXiv preprint arXiv:2411.13019, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.412, + 0.483, + 0.438 + ], + "angle": 0, + "content": "[7] Artificial Analysis. Text to image model arena, 2025. Accessed: April 10, 2025. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.44, + 0.483, + 0.495 + ], + "angle": 0, + "content": "[8] Soumen Bera. Partially occluded object detection and counting. In Proceedings of the 2015 Third International Conference on Computer, Communication, Control and Information Technology (C3IT), pages 1-6. IEEE, 2015. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.497, + 0.483, + 0.577 + ], + "angle": 0, + "content": "[9] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, Bin Li, Ping Luo, Tong Lu, Yu Qiao, and Jifeng Dai. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. arXiv preprint arXiv:2312.14238, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.58, + 0.483, + 0.648 + ], + "angle": 0, + "content": "[10] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.65, + 0.483, + 0.704 + ], + "angle": 0, + "content": "[11] Davide Chicco, Matthijs J Warrens, and Giuseppe Jurman. The coefficient of determination r-squared is more informative than smape, mae, mape,mse and rmse in regression analysis evaluation. Peerj computer science, 7:e623, 2021. 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.706, + 0.483, + 0.732 + ], + "angle": 0, + "content": "[12] Nikolas Coupland. How frequent are numbers? Language & Communication, 31(1):27-37, 2011. 13" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.734, + 0.483, + 0.815 + ], + "angle": 0, + "content": "[13] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, et al. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv preprint arXiv:2409.17146, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.818, + 0.483, + 0.871 + ], + "angle": 0, + "content": "[14] Zheyi Fan, Zihao Song, Di Wu, and Yixuan Zhu. Multibranch segmentation-guided attention network for crowd counting. Journal of Visual Communication and Image Representation, 97:103964, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.874, + 0.483, + 0.901 + ], + "angle": 0, + "content": "[15] Benito E Flores. A pragmatic view of accuracy measurement in forecasting. Omega, 14(2):93-98, 1986. 11" + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.115, + 0.483, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.093, + 0.906, + 0.162 + ], + "angle": 0, + "content": "[16] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Bartra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6904-6913, 2017. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.164, + 0.906, + 0.205 + ], + "angle": 0, + "content": "[17] David Ha and Jürgen Schmidhuber. Recurrent world models facilitate policy evolution. Advances in neural information processing systems, 31, 2018. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.207, + 0.906, + 0.288 + ], + "angle": 0, + "content": "[18] Porter Jenkins, Kyle Armstrong, Stephen Nelson, Siddhesh Gotad, J Stockton Jenkins, Wade Wilkey, and Tanner Watts. Countnet3d: A 3d computer vision approach to infer counts of occluded objects. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3008-3017, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.291, + 0.906, + 0.329 + ], + "angle": 0, + "content": "[19] Gaetano Kanizsa, Paolo Legrenzi, and Paolo Bozzi. Organization in vision: essays on gestalt perception. Praeger, 1979. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.334, + 0.906, + 0.387 + ], + "angle": 0, + "content": "[20] Kaleb Kassaw, Francesco Luzi, Leslie M Collins, and Jordan M Malof. Are deep learning models robust to partial object occlusion in visual recognition tasks? arXiv preprint arXiv:2409.10775, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.391, + 0.905, + 0.417 + ], + "angle": 0, + "content": "[21] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024.7" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.42, + 0.906, + 0.488 + ], + "angle": 0, + "content": "[22] Baiqi Li, Zhiqiu Lin, Wenxuan Peng, Jean de Dieu Nyandwi, Daniel Jiang, Zixian Ma, Simran Khanuja, Ranjay Krishna, Graham Neubig, and Deva Ramanan. Naturalbench: Evaluating vision-language models on natural adversarial samples. arXiv preprint arXiv:2410.14669, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.49, + 0.908, + 0.559 + ], + "angle": 0, + "content": "[23] Dingkang Liang, Jiahao Xie, Zhikang Zou, Xiaqing Ye, Wei Xu, and Xiang Bai. Crowdclip: Unsupervised crowd counting via vision-language model. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2893-2903, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.562, + 0.906, + 0.602 + ], + "angle": 0, + "content": "[24] Fangyu Liu, Guy Edward Toh Emerson, and Nigel Collier. Visual spatial reasoning. Transactions of the Association for Computational Linguistics, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.604, + 0.906, + 0.657 + ], + "angle": 0, + "content": "[25] Baraka Jacob Maiseli. Optimum design of chamfer masks using symmetric mean absolute percentage error. EURASIP Journal on Image and Video Processing, 2019(1):74, 2019. 11" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.661, + 0.906, + 0.729 + ], + "angle": 0, + "content": "[26] Roshanak Mirzaee and Hossein Rajaby. Spartqa: A textual question answering benchmark for spatial reasoning. In The 2021 Annual Conference of the North American Chapter of the Association for Computational Linguistics (NAACL-2021), 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.731, + 0.906, + 0.785 + ], + "angle": 0, + "content": "[27] Ingrid R Olson, J Christopher Gatenby, Hoi-Chung Leung, Pawel Skudlarski, and John C Gore. Neuronal representation of occluded objects in the human brain. Neuropsychologia, 42(1):95-104, 2004. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.788, + 0.736, + 0.802 + ], + "angle": 0, + "content": "[28] OpenAI. Hello gpt-4o, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.804, + 0.906, + 0.843 + ], + "angle": 0, + "content": "[29] OpenCompass Team. Openvlm leaderboard. https://huggingface.co/spaces/opencompass/open_vlmleaderboard, 2024. Accessed: 2024-11-13. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.846, + 0.906, + 0.9 + ], + "angle": 0, + "content": "[30] Yumiko OTSUKA, So KANAZAWA, and Masami K YAMAGUCHI. Development of modal and amodal completion in infants. Perception (London. Print), 35(9):1251-1264, 2006. 1, 2" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.908, + 0.9 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.925, + 0.505, + 0.937 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.093, + 0.482, + 0.176 + ], + "angle": 0, + "content": "[31] Ege Ozguroglu, Ruoshi Liu, Dídac Surís, Dian Chen, Achal Dave, Pavel Tokmakov, and Carl Vondrick. pix2gestalt: Amodal segmentation by synthesizing wholes. In 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3931-3940. IEEE Computer Society, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.177, + 0.482, + 0.232 + ], + "angle": 0, + "content": "[32] Max Peeperkorn, Tom Kouwenhoven, Dan Brown, and Anna Jordanous. Is temperature the creativity parameter of large language models? arXiv preprint arXiv:2405.00492, 2024. 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.233, + 0.482, + 0.287 + ], + "angle": 0, + "content": "[33] Muhammad Fetrat Qharabagh, Mohammadreza Ghofrani, and Kimon Fountoulakis. Lvlm-count: Enhancing the counting ability of large vision-language models. arXiv preprint arXiv:2412.00686, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.289, + 0.482, + 0.33 + ], + "angle": 0, + "content": "[34] Wenmo Qiu and Xinhan Di. Occ-mlm: Empowering multimodal large language model for the understanding of occluded objects. arXiv preprint arXiv:2410.01261, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.331, + 0.482, + 0.413 + ], + "angle": 0, + "content": "[35] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.415, + 0.482, + 0.455 + ], + "angle": 0, + "content": "[36] Navid Rajabi and Jana Kosecka. Gsr-bench: A benchmark for grounded spatial reasoning evaluation via multimodal llms. arXiv preprint arXiv:2406.13246, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.457, + 0.482, + 0.512 + ], + "angle": 0, + "content": "[37] Viresh Ranjan, Udbhav Sharma, Thu Nguyen, and Minh Hoai. Learning to count everything. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3394-3403, 2021. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.513, + 0.482, + 0.58 + ], + "angle": 0, + "content": "[38] Kaziwa Saleh, Sándor Szenási, and Zoltán Vámossy. Mask guided gated convolution for amodal content completion. In 2024 IEEE 22nd Jubilee International Symposium on Intelligent Systems and Informatics (SISY), pages 000321-000326. IEEE, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.582, + 0.482, + 0.664 + ], + "angle": 0, + "content": "[39] Alane Suhr, Mike Lewis, James Yeh, and Yoav Artzi. A corpus of natural language for visual reasoning. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 217-223, Vancouver, Canada, 2017. Association for Computational Linguistics. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.666, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[40] Kimi Team, Angang Du, Bohong Yin, Bowei Xing, Bowen Qu, Bowen Wang, Cheng Chen, Chenlin Zhang, Chenzhuang Du, Chu Wei, Congcong Wang, Dehao Zhang, Dikang Du, Dongliang Wang, Enming Yuan, Enzhe Lu, Fang Li, Flood Sung, Guangda Wei, Guokun Lai, Han Zhu, Hao Ding, Hao Hu, Hao Yang, Hao Zhang, Haoning Wu, Haotian Yao, Haoyu Lu, Heng Wang, Hongcheng Gao, Huabin Zheng, Jiaming Li, Jianlin Su, Jianzhou Wang, Jiaqi Deng, Jiezhong Qiu, Jin Xie, Jinhong Wang, Jingyuan Liu, Junjie Yan, Kun Ouyang, Liang Chen, Lin Sui, Longhui Yu, Mengfan Dong, Mengnan Dong, Nuo Xu, Pengyu Cheng, Qizheng Gu, Runjie Zhou, Shaowei Liu, Sihan Cao, Tao Yu, Tianhui Song, Tongtong Bai, Wei Song, Weiran He, Weixiao Huang, Weixin Xu, Xiaokun Yuan, Xingcheng Yao, Xingzhe Wu, Xinxing Zu, Xinyu Zhou, Xinyuan Wang, Y. Charles, Yan Zhong, Yang Li, Yangyang Hu, Yanru Chen, Yejie Wang, Yibo Liu, Yibo Miao, Yidao Qin, Yimin Chen" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.093, + 0.482, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.905, + 0.162 + ], + "angle": 0, + "content": "Yiping Bao, Yiqin Wang, Yongsheng Kang, Yuanxin Liu, Yulun Du, Yuxin Wu, Yuzhi Wang, Yuzi Yan, Zaida Zhou, Zhaowei Li, Zhejun Jiang, Zheng Zhang, Zhilin Yang, Zhiqi Huang, Zihao Huang, Zijia Zhao, and Ziwei Chen. Kimi-VL technical report, 2025. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.164, + 0.905, + 0.259 + ], + "angle": 0, + "content": "[41] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.262, + 0.905, + 0.316 + ], + "angle": 0, + "content": "[42] Wei-Yao Wang, Zhao Wang, Helen Suzuki, and Yoshiyuki Kobayashi. Seeing is understanding: Unlocking causal attention into modality-mutual attention for multimodal llms. arXiv preprint arXiv:2503.02597, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.318, + 0.905, + 0.358 + ], + "angle": 0, + "content": "[43] Yiding Wang, Yuxin Qin, and Jiali Cui. Occlusion robust wheat ear counting algorithm based on deep learning. Frontiers in Plant Science, 12:645899, 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.361, + 0.905, + 0.415 + ], + "angle": 0, + "content": "[44] Yongjie Wang, Feng Wang, and Dongyang Huang. Dual-branch counting method for dense crowd based on self-attention mechanism. Expert Systems with Applications, 236:121272, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.418, + 0.905, + 0.445 + ], + "angle": 0, + "content": "[45] Karen Wynn. Children's understanding of counting. Cognition, 36(2):155-193, 1990. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.447, + 0.905, + 0.502 + ], + "angle": 0, + "content": "[46] Katherine Xu, Lingzhi Zhang, and Jianbo Shi. Amodal completion via progressive mixed context diffusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9099-9109, 2024. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.504, + 0.905, + 0.559 + ], + "angle": 0, + "content": "[47] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. Minicpm-v: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.56, + 0.905, + 0.614 + ], + "angle": 0, + "content": "[48] Lifang Zhou, Songlin Rao, Weisheng Li, Bo Hu, and Bo Sun. Multi-branch progressive embedding network for crowd counting. Image and Vision Computing, page 105140, 2024. 8" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.614 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.092, + 0.091, + 0.18, + 0.108 + ], + "angle": 0, + "content": "Appendix" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.117, + 0.317, + 0.134 + ], + "angle": 0, + "content": "A. Implementation Details" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.142, + 0.242, + 0.158 + ], + "angle": 0, + "content": "A.1. Metric Details" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.164, + 0.485, + 0.468 + ], + "angle": 0, + "content": "We use symmetric mean percent error (sMAPE) as the primary metric for our benchmarks due to its resistance to bias for under/over predictions and small/large ground truths [25]. The standard metric for a counting benchmark is mean average error (MAE). MAE is popular, but heavily penalizes predictions that deviate by a small margin from big ground truths, highlighting the necessity for a metric that gives equal weighting to all questions. Mean average percent error (MAPE) initially seems appealing but is disproportionately inflated for small ground truths and is biased towards overpredictions. Mean square error (MSE) and root mean square error (RMSE) are also commonly used but are very sensitive to outliers because they square the error. Intuitively, performing well on almost all questions and poorly on a small subset should score better than consistently being wrong. Among commonly-used metrics, sMAPE is the only metric that evaluates performance in relation to the distribution of ground truth elements [11]. There are two common definitions [15] for sMAPE, but we use the one that scales to \\(100\\%\\). sMAPE is given by:" + }, + { + "type": "equation", + "bbox": [ + 0.172, + 0.48, + 0.483, + 0.518 + ], + "angle": 0, + "content": "\\[\n\\mathrm {s M A P E} = 1 0 0 \\cdot \\frac {1}{n} \\sum_ {i = 1} ^ {n} \\frac {\\left| y _ {i} - \\hat {y} _ {i} \\right|}{\\left| y _ {i} \\right| + \\left| \\hat {y} _ {i} \\right|} \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.526, + 0.484, + 0.603 + ], + "angle": 0, + "content": "where \\( y_{i} \\) represents the actual values, \\( \\hat{y}_i \\) represents the predicted values, and \\( n \\) is the number of observations. sMAPE is capped at \\( 100\\% \\), providing a finite scoring range. This feature is ideal for challenging tasks like ours, as it penalizes model responses that fail to produce an answer." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.612, + 0.248, + 0.629 + ], + "angle": 0, + "content": "A.2. Output Tokens" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.634, + 0.484, + 0.725 + ], + "angle": 0, + "content": "To maximize the VLM's chance at success, we allocate a high number of output tokens to generate a rationale and output. This varies per model. We give 4000 tokens to InternVL2, 2000 tokens to Molmo, and 8192 tokens to Qwen2VL, following their max output lengths. For GPT-40, we use the default of 4096 tokens." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.739, + 0.427, + 0.756 + ], + "angle": 0, + "content": "B. CAPTURE Dataset Creation Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.765, + 0.483, + 0.903 + ], + "angle": 0, + "content": "The following expands upon Sec. 2.2. While FSC-147, a diverse counting dataset with manual annotations, is a strong starting point, it cannot immediately be adapted to our task. To make the task of amodal counting solvable, our dataset requires images with patterns in them. A person (or model) can infer how the pattern would continue and thus accurately predict the total number. For questions to be answerable, the dataset's images must be filtered down to represent patterns a model or person could recognize." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.228 + ], + "angle": 0, + "content": "Our filtering process follows two stages. First, we prompt GPT-4o to determine whether the objects were arranged in a pattern. Second, if the model responded with \"no\", the images were immediately discarded. If the model output was \"yes\", the log probability of the token is stored. Empirically, we found that higher log probability values (i.e. higher confidence scores) corresponded to more well-defined patterns in the image. Thus, we use the log probabilities for filtering." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.235, + 0.907, + 0.433 + ], + "angle": 0, + "content": "Specifically, let \\( P_{\\mathrm{yes}} \\) be the log probability of the \"yes\" token and \\( T \\) denote the threshold for determining how well-defined a pattern is. To filter the images based on pattern rigidity, we apply the following condition: \\( e^{P_{\\mathrm{yes}}} \\geq T \\). This inequality yields 991 images from the original dataset (16.12%). Next, we manually filter each of the selected images to ensure that they indeed contain patterns and feature a countable number of objects, excluding 34 images. Afterward, we manually place a \"fair\" occluding box in each image, i.e. a box that leaves sufficient portions of the pattern visible, such that the pattern can still be inferred from the unoccluded portions of the image. Occluding boxes were also chosen with varying positions and sizes in the image." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.468, + 0.71, + 0.485 + ], + "angle": 0, + "content": "C. Additional Analysis" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.501, + 0.906, + 0.624 + ], + "angle": 0, + "content": "Here we provide additional experiments that attempt to either increase model performance on CAPTURE or dissect the reasons behind poor model performance. Chain-of-Thought inhibits model performance, while temperature backoff slightly improves performance. Additionally, we find that models struggle at counting just occluded objects, are overconfident in occluded settings, and are biased to predict specific numbers." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.654, + 0.907, + 0.672 + ], + "angle": 0, + "content": "C.1. Chain-of-Thought reduces model performance" + }, + { + "type": "table", + "bbox": [ + 0.555, + 0.703, + 0.864, + 0.779 + ], + "angle": 0, + "content": "
MethodCAPTURErealCAPTUREsynthetic
GPT-4o14.759.71
GPT-4o w/ CoT14.947.73
Qwen229.3311.74
Qwen2 w/ CoT31.5737.81
" + }, + { + "type": "table_caption", + "bbox": [ + 0.576, + 0.78, + 0.843, + 0.794 + ], + "angle": 0, + "content": "Table 6. CoT experiments (metric: sMAPE)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.826, + 0.906, + 0.903 + ], + "angle": 0, + "content": "During development, we experimented with several common strategies including CoT. In Tab. 6, we find that CoT reduces model performance except in the occluded synthetic scenario, most likely because the included examples are very similar to the test prompt." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.093, + 0.089, + 0.904, + 0.251 + ], + "angle": 0, + "content": "
ModelError (%) (↓)
RealSynthetic
UnoccludedOccludedUnoccludedOccluded
Originalw/ backoff (Δ)Originalw/ backoff (Δ)Originalw/ backoff (Δ)Originalw/ backoff (Δ)
GPT-4o13.3412.57 (−0.77)14.7514.39 (−0.36)5.905.93 (+0.03)9.719.23 (−0.48)
InternVL226.1727.09 (+0.92)32.9032.37 (−0.53)16.4415.59 (−0.85)17.5716.24 (−1.33)
Molmo25.9021.23 (−4.67)32.4928.17 (−4.32)8.402.88 (−5.52)17.7315.85 (−1.88)
Qwen2VL18.9619.40 (+0.44)29.3328.47 (−0.86)6.636.66 (+0.03)11.7411.51 (−0.23)
Avg. of 4 VLMs21.0920.07 (−1.02)27.3725.85 (−1.52)9.347.76 (−1.58)14.1913.21 (−0.98)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.259, + 0.907, + 0.304 + ], + "angle": 0, + "content": "Table 7. Comparison of models on CAPTURE across four scenarios (CAPTURE\\(^{\\text{real}}\\) vs. CAPTURE\\(^{\\text{synthetic}}\\), Unoccluded vs. Occluded). \"Original\" indicates no backoff; \"w/ backoff\" indicates applying backoff, with \\(\\Delta = (w/ backoff) - (Original)\\). Negative \\(\\Delta\\) values indicate an improvement." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.328, + 0.483, + 0.361 + ], + "angle": 0, + "content": "C.2. Temperature backoff slightly improves model performance" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.368, + 0.483, + 0.461 + ], + "angle": 0, + "content": "To improve VLM performance on CAPTURE, we address a trend we established during early testing. Most of the time, the VLM fails by reaching an incorrect answer. Sometimes, however, our benchmark can cause VLMs to produce a long and irrelevant response that strays from the original prompt, leading to the worst possible sMAPE score (100%)." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.462, + 0.483, + 0.69 + ], + "angle": 0, + "content": "To reduce the number of skipped questions, we experiment with temperature backoff, which iteratively decreases the sampling temperature. Because the answer extractor can immediately identify an incoherent output, we can regenerate the response with a lower temperature to get the model to answer the task properly. Consistent with our findings, Peeperkorn et al. [32] also finds that lower temperatures increase coherence in VLMs, thereby enhancing their chances of maintaining relevance to the prompt. Therefore, temperature backoff gives VLMs a better chance of achieving higher scores. Each time the answer extractor returns an empty answer because the VLMs produced an incoherent answer, we reduce the temperature by 0.1 (starting from 1.0) until it reaches 0.0, at which point the example is skipped." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.702, + 0.483, + 0.854 + ], + "angle": 0, + "content": "Models perform slightly better with temperature backoff. We introduced temperature backoff to reduce model incoherence, and it performed fairly well. As shown in Tab. 7 (bottom), this method slightly improves performance across each model, resulting in an average error reduction of \\(5.78\\%\\) in \\(\\mathrm{CAPTURE}^{\\mathrm{real}}\\) and \\(5.45\\%\\) in \\(\\mathrm{CAPTURE}^{\\mathrm{synthetic}}\\). Temperature backoff essentially allows the model to reattempt the question if it fails to respond to the prompt. Similar to previous results, positive results from reattempts highlight VLMs' weak reasoning abilities." + }, + { + "type": "table", + "bbox": [ + 0.538, + 0.325, + 0.885, + 0.462 + ], + "angle": 0, + "content": "
ModelError (%) [↓]
All ObjectsOnly Occluded
GPT-4o14.7526.13 (+11.38)
InternVL232.9075.82 (+42.92)
Molmo32.4996.79 (+64.30)
Qwen2VL29.3332.89 (+3.56)
Avg. of 4 VLMs27.3757.91 (+30.54)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.471, + 0.907, + 0.514 + ], + "angle": 0, + "content": "Table 8. VLM sMAPE for counting all objects and counting only the occluded objects in CAPTURE\\(^{\\text{real}}\\). Metric: sMAPE (lower is better)." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.541, + 0.907, + 0.573 + ], + "angle": 0, + "content": "C.3. Models struggle at counting just occluded objects" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.579, + 0.907, + 0.716 + ], + "angle": 0, + "content": "We separately test whether models can count only the occluded objects (not including the visible objects) in an image. Here, as Tab. 8 demonstrates, the models perform especially poorly in this task, with high error rates across all models. Therefore, we can conclude that occlusion and counting are uniquely difficult for the VLMs, and that the drop in performance between unoccluded and occluded settings in Tab. 2 is likely due to a poor ability to count occluded objects." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.727, + 0.902, + 0.743 + ], + "angle": 0, + "content": "C.4. Models are overconfident in occluded settings" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.75, + 0.907, + 0.901 + ], + "angle": 0, + "content": "We test the uncertainty with two different methods of obtaining confidence on Qwen2VL. In the first method, we prompt Qwen2VL for its confidence in the answer. For the second method, we generate 20 responses for every question in our VQA and calculate the confidence as the percentage of times the most common answer was generated. These results can be seen in Fig. 9 and Fig. 10 respectively. In both reliability curves, there is a slight trend that the model's confidence is negatively correlated with the error, which is the desired outcome. In \\(\\mathrm{CAPTURE}^{\\mathrm{real}}\\), how-" + }, + { + "type": "page_footnote", + "bbox": [ + 0.091, + 0.875, + 0.483, + 0.901 + ], + "angle": 0, + "content": "2We set \\(T = 0.9999\\) based on manual evaluation, finding it resulted in fewer false positives." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.485, + 0.198 + ], + "angle": 0, + "content": "ever, the correlation is much stronger. While the models are somewhat calibrated (with generally lower confidence on higher-error examples, there are still outliers in prompted confidence for CAPTURE\\(^{\\text{real}}\\) occluded and sampled confidence for CAPTURE\\(^{\\text{synthetic}}\\) occluded. This indicates that not only do the models perform worse under occlusion, but they can also be overconfident." + }, + { + "type": "image", + "bbox": [ + 0.113, + 0.237, + 0.446, + 0.433 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.449, + 0.483, + 0.475 + ], + "angle": 0, + "content": "Figure 9. Reliability curve of prompting model for confidence vs. sMAPE." + }, + { + "type": "image", + "bbox": [ + 0.113, + 0.533, + 0.446, + 0.73 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.745, + 0.483, + 0.772 + ], + "angle": 0, + "content": "Figure 10. Reliability curve of sampling model for confidence vs. sMAPE." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.802, + 0.483, + 0.819 + ], + "angle": 0, + "content": "C.5. Models are biased to predict specific numbers." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.825, + 0.484, + 0.901 + ], + "angle": 0, + "content": "To examine where models frequently err, we generated a confusion matrix for every model based on CAPTURE\\(^{\\text{synthetic}}\\) results (shown in Appendix C.5). The y-axis represents the ground truth values and the x-axis represents the model's answers. We find that models often over-predict" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.907, + 0.259 + ], + "angle": 0, + "content": "numbers associated with common counts in real life: GPT-40 tends to predict numbers like 8, 9, 10, and 12, which are all non-prime numbers (i.e. can be arranged into a grid) and common groupings of objects. For example, 12 is a common grouping (dozens) and allows arrangements into 3x4 or 2x6 grids. InternVL and Qwen2VL over-predict 5 and 10, aligning with how humans conceptualize numbers. Indeed, Coupland [12] found that numbers 5, 10, 20, and other round numbers appear disproportionately more in online texts. Molmo has no correlation with these factors, possibly due to its unique \"point and count\" ability." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.271, + 0.667, + 0.289 + ], + "angle": 0, + "content": "D. VLM Prompts" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.296, + 0.907, + 0.327 + ], + "angle": 0, + "content": "We use a 100-example validation set for each setting to select the best prompt, which we report below." + }, + { + "type": "title", + "bbox": [ + 0.53, + 0.34, + 0.889, + 0.372 + ], + "angle": 0, + "content": "Prompt for GPT-4o on CAPTURE\\(^{\\text{real}}\\) unoccluded split." + }, + { + "type": "text", + "bbox": [ + 0.531, + 0.379, + 0.89, + 0.44 + ], + "angle": 0, + "content": "Count the exact number of [object] in the image. Assume the pattern of [object] continues behind any black box. Provide the total number of [object] as if the black box were not there." + }, + { + "type": "title", + "bbox": [ + 0.532, + 0.468, + 0.889, + 0.499 + ], + "angle": 0, + "content": "Prompt for InternVL2 on CAPTURE\\(^{\\text{real}}\\) unoccluded split." + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.507, + 0.889, + 0.539 + ], + "angle": 0, + "content": "Your task is to count objects in the image. First, state what the pattern is, then give your final count." + }, + { + "type": "title", + "bbox": [ + 0.532, + 0.568, + 0.888, + 0.599 + ], + "angle": 0, + "content": "Prompt for Molmo on CAPTURE\\(^{\\text{real}}\\) unoccluded split." + }, + { + "type": "text", + "bbox": [ + 0.531, + 0.607, + 0.889, + 0.668 + ], + "angle": 0, + "content": "Count the exact number of [object] in the image. Only count [object] that are visible within the frame. If [object] are partially in the frame (i.e. if any part of [object] are visible), count it." + }, + { + "type": "title", + "bbox": [ + 0.532, + 0.697, + 0.889, + 0.73 + ], + "angle": 0, + "content": "Prompt for Qwen2VL on CAPTURE\\(^{\\text{real}}\\) unoccluded split." + }, + { + "type": "text", + "bbox": [ + 0.531, + 0.737, + 0.89, + 0.873 + ], + "angle": 0, + "content": "Count the exact number of [object] in the image. Assume the pattern of [object] continues behind any black box. Provide the total number of [object] as if the black box were not there. Only count [object] that are visible within the frame (or would be visible without the occluding box). If [object] are partially in the frame (i.e. if any part of [object] are visible), count it. If the [object] would be partially in the frame without the occluding box, count it." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.092, + 0.088, + 0.496, + 0.347 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.089, + 0.902, + 0.347 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.091, + 0.35, + 0.495, + 0.611 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.351, + 0.902, + 0.61 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.205, + 0.623, + 0.793, + 0.639 + ], + "angle": 0, + "content": "Figure 11. Confusion matrix: predicted vs. ground truth counts for CAPTURE\\(^{\\text{real}}\\)s occluded split." + }, + { + "type": "title", + "bbox": [ + 0.11, + 0.667, + 0.466, + 0.699 + ], + "angle": 0, + "content": "Prompt for GPT-4o, InternVL2, and Qwen2VL on CAPTURE\\(^{\\text{real}}\\) occluded split." + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.706, + 0.468, + 0.889 + ], + "angle": 0, + "content": "Count the exact number of [object] in the image. Assume the pattern of [object] continues behind any black box. Provide the total number of [object] as if the black box were not there. Only count [object] that are visible within the frame (or would be visible without the occluding box). If [object] are partially in the frame (i.e. if any part of [object] are visible), count it. If the [object] would be partially in the frame without the occluding box, count it. Molmo: Your task is to count objects in the image. Assume the pattern of [object] continues behind the black box. First, state what the pattern is, then give your final count." + }, + { + "type": "title", + "bbox": [ + 0.532, + 0.669, + 0.888, + 0.701 + ], + "angle": 0, + "content": "Prompt for Molmo on CAPTURE\\(^{\\text{real}}\\) occluded split." + }, + { + "type": "text", + "bbox": [ + 0.531, + 0.708, + 0.889, + 0.768 + ], + "angle": 0, + "content": "Your task is to count objects in the image. Assume the pattern of [object] continues behind the black box. First, state what the pattern is, then give your final count." + }, + { + "type": "title", + "bbox": [ + 0.532, + 0.807, + 0.889, + 0.838 + ], + "angle": 0, + "content": "Prompt for GPT-4o on CAPTUREsynthetic unoccluded split." + }, + { + "type": "text", + "bbox": [ + 0.532, + 0.846, + 0.888, + 0.877 + ], + "angle": 0, + "content": "Your task is to count objects in the image. First, state what the pattern is, then give your final count." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.509, + 0.937 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.109, + 0.096, + 0.465, + 0.128 + ], + "angle": 0, + "content": "Prompt for InternVL2 on CAPTUREsynthetic unoccluded split." + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.136, + 0.465, + 0.197 + ], + "angle": 0, + "content": "Count the exact number of [dot shape]s in the image. Only count [dot shape]s that are visible within the frame. If [dot shape]s are partially in the frame (i.e. if any part of [dot shape]s are visible), count it." + }, + { + "type": "title", + "bbox": [ + 0.109, + 0.228, + 0.465, + 0.259 + ], + "angle": 0, + "content": "Prompt for Molmo on CAPTUREsynthetic unoccluded split." + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.268, + 0.465, + 0.312 + ], + "angle": 0, + "content": "Count the exact number of [dot shape]s in the image. Only count [dot shape]s that are visible within the frame." + }, + { + "type": "title", + "bbox": [ + 0.109, + 0.342, + 0.465, + 0.373 + ], + "angle": 0, + "content": "Prompt for Qwen2VL on CAPTUREsynthetic unoccluded split." + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.382, + 0.466, + 0.532 + ], + "angle": 0, + "content": "Count the exact number of [dot shape]s in the image. Assume the pattern of [dot shape]s continues behind any black box. Provide the total number of [dot shape]s as if the black box were not there. Only count [dot shape]s that are visible within the frame (or would be visible without the occluding box). If [dot shape]s are partially in the frame (i.e. if any part of [dot shape]s are visible), count it. If the [dot shape]s would be partially in the frame without the occluding box, count it." + }, + { + "type": "title", + "bbox": [ + 0.109, + 0.562, + 0.465, + 0.593 + ], + "angle": 0, + "content": "Prompt for GPT-4o and Molmo on CAP-TUREsynthetic occluded split." + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.602, + 0.465, + 0.66 + ], + "angle": 0, + "content": "Your task is to count objects in the image. Assume the pattern of [dot shape]s continues behind the black box. First, state what the pattern is, then give your final count." + }, + { + "type": "title", + "bbox": [ + 0.109, + 0.69, + 0.465, + 0.721 + ], + "angle": 0, + "content": "Prompt for InternVL2 and Qwen2VL on CAPTUREsynthetic occluded split." + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.729, + 0.466, + 0.879 + ], + "angle": 0, + "content": "Count the exact number of [dot shape]s in the image. Assume the pattern of [dot shape]s continues behind any black box. Provide the total number of [dot shape]s as if the black box were not there. Only count [dot shape]s that are visible within the frame (or would be visible without the occluding box). If [dot shape]s are partially in the frame (i.e. if any part of [dot shape]s are visible), count it. If the [dot shape]s would be partially in the frame without the occluding box, count it." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.925, + 0.508, + 0.937 + ], + "angle": 0, + "content": "15" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15485/a2c7802b-0ba4-4f59-a685-cb9d447cab8d_origin.pdf b/data/2025/2504_15xxx/2504.15485/a2c7802b-0ba4-4f59-a685-cb9d447cab8d_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..72ae05425dd4ad25e0b4279968d8fc1f959a0ce1 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/a2c7802b-0ba4-4f59-a685-cb9d447cab8d_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:701889165c85fa097d4a836f8d95aab14e8d34ed0f6099c1f75e16574290ab5b +size 988148 diff --git a/data/2025/2504_15xxx/2504.15485/full.md b/data/2025/2504_15xxx/2504.15485/full.md new file mode 100644 index 0000000000000000000000000000000000000000..8574c6f6667e7fdc16ea4d6e00555e858936dab8 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/full.md @@ -0,0 +1,440 @@ +# CAPTURE: Evaluating Spatial Reasoning in Vision Language Models via Occluded Object Counting + +Atin Pothiraj + +Elias Stengel-Eskin + +Jaemin Cho + +Mohit Bansal + +UNC Chapel Hill + +{atin, esteng, jmincho, mbansal}@cs.unc.edu + +# Abstract + +Recognizing and reasoning about occluded (partially or fully hidden) objects is vital to understanding visual scenes, as occlusions frequently occur in real-world environments and act as obstacles for spatial comprehension. To test models' ability to reason about multiple occluded objects, we introduce a novel task, Counting Amodally for Patterns Through Unseen REgions (CAPTURE), which requires a model to count objects arranged in a pattern by inferring how the pattern continues behind an occluder (an object which blocks parts of the scene). CAPTURE requires both recognizing visual patterns and reasoning, making it a useful testbed for evaluating vision-language models (VLMs) on whether they understand occluded patterns and possess spatial understanding skills. By requiring models to reason about occluded objects, CAPTURE also tests VLMs' ability to form world models that would allow them to fill in missing information. CAPTURE consists of two parts: (1) CAPTURE $^{\text{real}}$ , with manually filtered images of real objects in patterns and (2) CAPTURE $^{\text{synthetic}}$ , a controlled diagnostic with generated patterned images. We evaluate four strong VLMs (GPT-4o, Intern-VL2, Molmo, and Qwen2-VL) on CAPTURE, finding that models struggle to count on both occluded and unoccluded patterns. Crucially, we find that models perform worse with occlusion, suggesting that VLMs are also deficient in inferring unseen spatial relationships: even the strongest VLMs like GPT-4o fail to count with occlusion. In contrast, we find that humans achieve very little error on CAPTURE. We also find that providing auxiliary information of occluded object locations increases performance, underscoring that the model error comes both from an inability to handle occlusion as well as difficulty in counting in images. $^{1}$ + +# 1. Introduction + +Inferring what lies behind different objects in occluded scenes is crucial for human perception, as it allows us to + +![](images/981b3b6aabc89ab61a54e82e52bc90dd504b1f841da1c843d3996fb961f096a9.jpg) +Instruction: Count the exact number of cups in the image, assuming the pattern continues behind the black box. + +![](images/219a1166134d1772e966ee56c5d2d9ed4a6b2d6e102dc7a24aa55784ec37d983.jpg) +Figure 1. CAPTURE example with an output from GPT-4o. While people can easily infer the missing number of cups and correctly reason over occluded patterns, models generally struggle to reason over these occluded scenes. + +maintain a coherent understanding of our environment even when parts are hidden. The human visual system accomplishes this by integrating past experiences, context, and sensory inputs to reconstruct incomplete scenes [19, 27, 30, 45]. Meanwhile, recent advancements in vision-language models (VLMs) – especially in terms of visual and spatial reasoning – raise the question of whether these systems can perform similar inferential tasks. One way of measuring such capabilities is through amodal completion – the task of inferring the invisible parts of partially occluded objects; here, vision-only models are typically evaluated via dense prediction tasks like object segmentation and image inpainting [5]. However, this format is not well-suited for assessing VLMs, whose outputs consist of text tokens rather than pixel-level predictions. This raises a critical question: How can we quantify the ability of VLMs to form spatial world modeling [17] in the presence of occlusion? + +To address this, we introduce CAPTURE, Counting Amodally for Patterns Through Unseen REgions, a novel benchmark that tests a VLM's world modeling and spatial reasoning abilities through the task of amodal counting, where models are prompted to count occluded objects + +by amodally completing a pattern. CAPTURE focuses on counting as it provides an objective and easy-to-verify output by comparing predicted counts with ground truth values. Moreover, patterned objects appear in various real-world domains, especially in man-made environments like parking lots, cities, and warehouses, where counting objects is often required. Fig. 1 illustrates the CAPTURE task. We show a VLM an image where objects are placed in a regular pattern (e.g., a 4x4 grid) with some objects occluded, and ask the model to count the total number of objects in the image assuming that the pattern continues behind the occlusion. The task requires handling occlusion, pattern recognition, and counting skills that exist in humans from a fairly young age [27, 30, 45], thus humans can easily answer such questions – indeed, we find that people can complete CAPTURE tasks with almost no error. + +CAPTURE consists of two subsets: $\mathrm{CAPTURE}^{\mathrm{real}}$ and $\mathrm{CAPTURE}^{\mathrm{synthetic}}$ . As shown in Fig. 2, $\mathrm{CAPTURE}^{\mathrm{real}}$ contains real-world images and tests the ability of models to perform amodal counting in naturalistic contexts, while $\mathrm{CAPTURE}^{\mathrm{synthetic}}$ allows us to analyze specific factors by controlling different variables like color, shape, and number of objects. All images in $\mathrm{CAPTURE}$ contain a pattern of objects and a manually annotated occluding black box covering some objects. $\mathrm{CAPTURE}^{\mathrm{real}}$ contains 924 images with a diverse range of settings and objects, covering 92 different object types, while $\mathrm{CAPTURE}^{\mathrm{synthetic}}$ contains 1250 images across multiple attribute classes. + +By combining vision encoders with large language models (LLMs), VLMs have the potential to reason in a zero-shot way about visual inputs. To put this ability to the test and measure VLMs' ability to reason about missing visual information, we evaluate four strong recent VLMs (GPT40, InternVL2, Molmo, and Qwen2VL) on CAPTURE. Our experiment results (Sec. 4) show that models generally struggle with the multiple aspects of the task, with high error rates on both CAPTURE $^{\text{real}}$ and CAPTURE $^{\text{synthetic}}$ for occluded and unoccluded images. In contrast, we find that humans can perform the task easily: whereas model performance deteriorates as more objects in images are occluded, humans complete the task almost perfectly. We also compare VLMs to a vision-only model trained to count visible objects; while this model generally outperforms VLMs, its error is directly tied to the number of occluded objects – the more objects are occluded, the higher its error will be. + +By objectively measuring VLMs' spatial reasoning capabilities under occlusion, CAPTURE highlights an unexpected weakness in VLMs. We analyze this weakness by providing the model with additional clues and information. Specifically, we test to what degree the VLMs' failure stems from an inability to integrate visual information by providing it with a text-based representation of the visible objects in the image in the form of object coordinates; here, VLMs + +perform substantially better, indicating that their poor performance on CAPTURE stems partly from an inability to count objects in images, rather than an inability to count more generally. Our findings align with previous work, which similarly finds that VLMs struggle to count in images [22, 33, 42]. We also test the degree to which VLM errors stem from an inability to form a world model by providing it with auxiliary information (the coordinates of the occluded objects in text, or inpainting the occluded regions). We find that VLMs perform substantially better with this auxiliary information, suggesting that VLMs are partly limited by their inability to imagine the missing visual information. Addressing these gaps is critical for VLMs to function effectively in real-world scenarios, where visual reasoning often involves occlusions – whether counting stadium seats, components on production lines, or buildings in neighborhoods. We hope that our work will foster future research on improving the world modeling capabilities of VLMs. + +# 2.CAPTURE + +# 2.1. Task Overview + +Input/output formulation. CAPTURE tests VLMs on occlusion reasoning, pattern recognition, and counting of both visible and occluded objects. VLMs already achieve high accuracy in classifying single, occluded objects [20]. Thus, we also argue that VLMs have the potential to perform well on CAPTURE's challenging task because their proficiency in handling occlusion ought to enable them to recognize occluded objects and reason accordingly. All images in CAPTURE contain a pattern. This makes the task solvable for models and people - if the objects were not placed in a pattern, it would be unreasonable to expect models to infer the position of the occluded objects. For example, given an image of a random pile of coins with a region occluded, it is not easy to infer whether the occluded region contains no coins or contains roughly the same amount as the rest of the pile. For this task, the patterns considered are all regular and fairly small, e.g. grids, circles, triangles, and other regular shapes - see Fig. 2 for further examples. The last step of CAPTURE is counting, asking the model to provide an objectively measurable output. In addition to VLMs, we also test COUNTGD [3], a state-of-the-art object detection-based counting method, finding that it fails to account for the occluded scenario, as its training entails solely predicting the visible, unoccluded objects in the image. + +Metric. We use symmetric mean percent error (sMAPE) as the primary metric. sMAPE is given by: + +$$ +\mathrm {s M A P E} = 1 0 0 \cdot \frac {1}{n} \sum_ {i = 1} ^ {n} \frac {\left| y _ {i} - \hat {y} _ {i} \right|}{\left| y _ {i} \right| + \left| \hat {y} _ {i} \right|} \tag {1} +$$ + +where $y_{i}$ represents the actual values, $\hat{y}_i$ represents the pre + +# CAPTUREreal + +Instruction: Count the exact number of [object] in the image. Assume the pattern of [object] continues behind any black box. Provide the total number of [object] as if the black box were not there. + +![](images/0320e46c96b593a455fec0826baa3f421db6d2c4a95077806655be22fb113a09.jpg) +GPT-4o: 18 bottle caps visible...black box covers...5 caps...total estimated count...is 23. Ground truth: 19 + +![](images/b7f8ce3847d0a7e3146846c8edff7e0d3871512192406b9fb9b188c9cf8bf053.jpg) +GPT-4o: Bread rolls are organized in a 4x4 grid, which suggests there are 16 rolls in total. Ground truth: 20 +GPT-4o: Total nuts in each row...6 times 6 = 36. Nuts in the hidden $3 \times 3$ section = 9. 36 + 9 = 45 nuts. Ground truth: 42 +Figure 2. Example images with GPT-4o responses to CAPTURE $^{\text{real}}$ and CAPTURE $^{\text{synthetic}}$ occluded splits. + +![](images/fe55be050932617254da6354bbc993dec70850b06690d96aa4514239ea7aaef4.jpg) + +# CAPTUREsynthetic + +Instruction: Your task is to count objects in the image. Assume the pattern of [object] continues behind the black box. First, state what the pattern is, then give your final count. + +![](images/54a9fb14060fcc4940d0a1d77d308d09d15406b37a1ca1943cd3e02ab33800aa.jpg) +GPT-4o: Pattern: A circular arrangement... Visible count: ...8 ... Total count: ...12 dots. Ground truth: 11 + +![](images/037e79bd08c36fe6b80824bcb0caf4643a0ad47db6c01c322d5a57de9b716faa.jpg) +GPT-4o: ...pattern consists of...a 2x2 grid... can infer...an additional four blue squares in the obscured section. Final count: 8 blue squares Ground truth: 6 + +![](images/9049c59347241b0b3b291a226e884daeba5d88430388678bdb27fa8f3cd22b6c.jpg) +GPT-4o: Pattern: Triangular arrangement ...Final Count:7 red dots (5 visible $^+$ 2 estimated behind the black box). Ground truth:6 + +dicted values, and $n$ is the number of observations. sMAPE is capped at $100\%$ , providing a fixed range. This makes sMAPE ideal for challenging tasks like ours, as we can penalize responses that fail to produce an answer with a maximum error of $100\%$ . For a justification of sMAPE over other metrics, see Appendix A.1. + +# 2.2. Dataset + +CAPTURE $^{\text{real}}$ . We introduce a set of real images with patterns to test amodal counting in naturalistic settings. The original images and annotations come from the FSC-147 dataset [37], a diverse counting dataset with manual annotations for the number of target objects and all object bounding boxes in each image. FSC-147 contains a diverse array of objects, with 6146 real-world images across 147 object categories. We filter FSC-147 for images that contain identifiable and regular patterns of objects and manually overlay a black box to occlude some objects, resulting in 924 images. Filtering is first performed with GPT-4o and then manually verified; we also manually verify that determining objects despite the occlusion is feasible. For each example, we maintain both occluded and unoccluded versions. Further details on CAPTURE $^{\text{real}}$ can be found in Appendix B. + +CAPTUREsynthetic. While CAPTUREreal makes CAPTURE more applicable to real-world scenarios, each image is unique, making the data less controlled and challenging to draw clear conclusions about model performance. Images without background distractors, texture variance, and other potential visual obstacles provide a more controlled version of the task. Therefore, we create CAPTUREsynthetic to examine the task in a fully controlled environment. CAPTUREsynthetic comprises 1250 images of simple objects in patterns, where different variables are held constant or changed. We vary the following features: + +1. Object count: varies from 5 to 15. +2. Object: can be either dots or squares. + +
CAPTURErealCAPTUREsynthetic
# Images9241250
# Object Types922
Avg. Occluded Obj.13.972.73
Avg. Total Obj.61.4510.00
StrengthsDiverse Objects/SettingsConfounder-free
NaturalisticControllable Attributes
Realistic ContextUniformly Distributed
+ +Table 1. Statistics and strengths for CAPTURE splits. + +3. Arrangement/shape: can be a rectangle, circle, or pyramid (where feasible based on object count). +4. Location: we consider five positions on the page: center, top-left, top-right, bottom-left, or bottom-right. +5. Color: we randomly choose one of 5 colors for all the objects in an image. + +The $\mathrm{CAPTURE}^{\mathrm{synthetic}}$ data is split similarly to the $\mathrm{CAPTURE}^{\mathrm{real}}$ data; each configuration has a variant with an overlaid occluding box and one without. + +# 2.3. Statistics and Examples + +Fig. 2 shows examples from $\mathrm{CAPTURE}^{\mathrm{real}}$ and $\mathrm{CAPTURE}^{\mathrm{synthetic}}$ paired with their corresponding answers from GPT-4o and their ground truth answers. These examples show the range of objects and patterns in the dataset and highlight the task's feasibility for humans. Tab. 1 reports summary statistics for $\mathrm{CAPTURE}$ , including the number of images and object types, as well as the mean number of occluded and total objects in both splits of $\mathrm{CAPTURE}$ . The number of objects in $\mathrm{CAPTURE}^{\mathrm{real}}$ is shown in Fig. 3, where most images have between 0 and 30 objects. On $\mathrm{CAPTURE}^{\mathrm{synthetic}}$ , the maximum number of objects is 15, and $\mathrm{CAPTURE}^{\mathrm{synthetic}}$ images generally have 1-6 occluded objects (shown in Fig. 4, as further occlusion could make the count unresolvable). + +![](images/1bbc7a1297efec4901bf89c6b7bcce563982d0c367188c0e42f7ec57805c29ee.jpg) +Figure 3. # of objects in CAPTURE $^{\text{real}}$ images. + +![](images/b5bdfdd2908acbf8cdef268b0abf95ce0fe53e686332d8e190da4bf0e24212c7.jpg) +Figure 4. # of occluded objects in CAPTUREsynthetic images. + +# 3. Experiment Setup + +# 3.1. Models + +We experiment with GPT-4o [28], Intern-VL2-Llama3-8B [9, 10], Qwen2-VL-7B [41], MiniCPM-o 2.6 [47], and Kimi-VL-A3B [40] for their high scores on other VLM tasks [29]. We add Molmo 7B-D [13], because of its ability to "point and count," giving it a potential advantage on CAPTURE. Specifically, Molmo is trained on millions of examples that directly ground text to 2D coordinates (or "points") in images. This allows Molmo to directly point to image coordinates and count more easily by pointing to several objects. All the VLMs feature a different language backbone and vision encoder to provide broad coverage of model architectures. To evaluate models, we provide the model with the name of the specific object to be counted and the explicit instruction to count fully visible objects and objects behind the occluding box (in the occluded images). For each model, we test ten prompts on a validation set of 100 images, selecting the best prompt for each model in each dataset section (CAPTURE $^{\text{real}}$ /CAPTURE $^{\text{synthetic}}$ ) and for each environment (occluded/unoccluded). We provide the selected prompts in Appendix D. + +# 3.2. Answer Generation and Extraction + +Given the complex nature of CAPTURE, we allow models to generate open-ended responses and then subsequently extract answers. Further details (including the maximum number of tokens) can be found in Appendix A.2. + +Answer extraction. Empirically, we found that constraining the output to a specific format for ease of analysis neg- + +atively impacted benchmark performance. Therefore, we instead prompt models to generate freely and extract the final output number using a separate answer extractor based on Llama 3.1 8B [1]. This answer extractor takes the output from the model as input and prompts it to extract a single number representing the final answer. The answer extractor also identifies if an output failed to converge on a singular number answer and assigns a label to these examples. We mark such incomplete/incoherent model generations as 'skipped' questions and when calculating the error later, these responses are assigned the worst possible sMAPE score (100%). The answer extractor outputs were manually verified on 1000 outputs, and the extractor was found to be 100% accurate. + +Human and object detection baselines. We also report the performance of humans and a recent counting model (COUNTGD [3]) as baselines to establish a point of reference for model performance. To confirm that humans can perform the CAPTURE task, we provided 100 randomly selected occluded examples each from the CAPTURE $^{\text{real}}$ and CAPTURE $^{\text{synthetic}}$ subsets to 3 undergraduate students with no prior knowledge of the task. + +# 4. Results and Analysis + +# 4.1. Main Results on CAPTUREreal + +Models consistently struggle with counting and perform worse on occluded images. We run the VLMs on the occluded and unoccluded versions of CAPTURE to discern whether occlusion significantly impacts model performance. Tab. 2 shows that all models struggle with counting generally, performing poorly on both splits. Moreover, we see that every model performs better on the unoccluded images. On average, the models perform $6.28\%$ worse in $\mathrm{CAPTURE}^{\mathrm{real}}$ occluded images and $4.85\%$ worse in $\mathrm{CAPTURE}^{\mathrm{synthetic}}$ occluded images (in terms of absolute sMAPE), indicating increased difficulty from a standard counting task. The best model for both splits, GPT-4o, has an error rate of $14.75\%$ on $\mathrm{CAPTURE}^{\mathrm{real}}$ and a lower error rate of $9.71\%$ on $\mathrm{CAPTURE}^{\mathrm{synthetic}}$ . Across both the real and synthetic split, GPT-4o's error increases with occlusion, by $1.41\%$ on the real data and $3.81\%$ on the synthetic split. Interestingly, despite its fine-tuning on counting tasks, Molmo exhibits a sizable error rate of $32.5\%$ on $\mathrm{CAPTURE}^{\mathrm{real}}$ occluded images. The high error rates of VLMs indicate limited capabilities in visual understanding under occlusions, pattern recognition, and counting. We further analyze the source of these errors with oracle experiments in Sec. 4.3. + +Humans complete the task with almost no error. Tab. 3, evaluated on a 100-example subset of each split, confirms that humans complete the task with ease despite occlusion, + +
ModelError (%) [↓]
CAPTURErealCAPTUREsynthetic
Originalw/ Occlusion (Δ)Originalw/ Occlusion (Δ)
GPT-4o13.3414.75 (+1.41)5.909.71 (+3.81)
InternVL226.1732.90 (+6.73)16.4417.57 (+1.13)
Molmo25.9032.49 (+6.59)8.4017.73 (+9.33)
Qwen2VL18.9629.33 (+10.37)6.6311.74 (+5.11)
MiniCPM-o 2.623.8430.08 (+6.24)17.0619.00 (+1.94)
Kimi-VL-A3B23.4825.96 (+2.48)16.9118.07 (+1.16)
Avg. of 6 VLMs21.9527.59 (+5.64)11.8915.64 (+3.75)
+ +Table 2. Results across VLMs on all splits of CAPTURE, with average error for each column. Metric: sMAPE (lower is better). + +
ModelError (%) [↓]
CAPTURErealCAPTUREsynthetic
(Baseline)
Human3.790.92
(VLMs)
GPT-4o14.759.71
InternVL232.9017.57
Molmo32.4917.73
Qwen2VL29.3311.74
Avg. of 4 VLMs27.3714.19
+ +Table 3. Human baseline vs VLMs on CAPTURE $^{\text{real}}$ and CAPTURE $^{\text{synthetic}}$ (occluded split). Metric: sMAPE (lower is better). + +with an sMAPE of $3.79\%$ on $\mathrm{CAPTURE}^{\mathrm{real}}$ and $0.92\%$ on $\mathrm{CAPTURE}^{\mathrm{synthetic}}$ . On the same subset of examples, models performed 7 times worse on $\mathrm{CAPTURE}^{\mathrm{real}}$ and 14 times worse on $\mathrm{CAPTURE}^{\mathrm{synthetic}}$ than humans, underscoring the gap between VLMs and humans in this task. + +Object detection-based baseline outperforms VLMs. We attempt the task with a strong object detection-based model to highlight that a standard counting approach will experience a greater loss going from unoccluded to occluded environments, as it cannot capture any occluded objects, i.e. cannot reason. We choose COUNTGD [3], the top solution for unoccluded counting on FSC-147, on which it was trained. Because we draw our images from FSC-147's train and test sets, and COUNTGD trains on FSC-147, we only evaluate COUNTGD on the subset of our data sourced from the FSC-147 test set, consisting of 149 images. + +We find that COUNTGD deteriorates by $7.19\%$ on occluded images, increasing from $3.15\%$ sMAPE to $10.34\%$ as observed in Fig. 5. As expected, COUNTGD outperforms all VLMs on the unoccluded split as it is trained for counting on FSC-147. COUNTGD also outperforms the VLMs on the occluded split, reinforcing that only counting the visible objects is a hard-to-beat baseline. However, the drop in performance with occlusion is greater than the average + +![](images/6852ecb13e232280415a379065d7456c9457cca7da26b7d7923a10788f9ddee8.jpg) +Figure 5. VLM vs. VLM + CountGD hybrid on questions from the CAPTURE $^{\text{real}}$ (occluded split) that are not in COUNTGD training set. Metric: sMAPE (lower is better). + +VLM's drop, highlighting a disadvantage of non-reasoning solutions on CAPTURE: their error is necessarily tied directly to the number of occluded objects and they cannot address the task on their own, whereas a VLM might be able to infer missing objects via reasoning. + +Hybrid VLM counting systems improve performance. Finding that COUNTGD is far better at counting visible objects than VLMs, we leverage the advantage that COUNTGD has by feeding its visible object count information to the VLMs as part of the prompt. As expected, Fig. 5 illustrates that there is a considerable decrease in error when CountGD and the VLMs are combined. However, this hybrid system still performs worse than COUNTGD alone, indicating VLMs are still subpar even at counting just occluded objects (as further reinforced by Appendix C.3). + +# 4.2. Effect of Data Factors on VLM Performance + +Here, we use the CAPTURE $^{\text{synthetic}}$ data (which can be controlled precisely and fully annotated) to examine which features correlate with model performance. We test the effect of the following variables on final performance: (1) Increasing the number of occluded objects; (2) Varying the pattern. We also investigate whether models can classify patterns, and to what degree models can predict the number of occluded objects only (rather than the total). + +Models perform worse when more dots are occluded. In Fig. 6 (right), we observe that error increases with re + +![](images/d1ff88f8e06685dc0b485224cacfa612ed34e24c8db13ff474eb31b034de7999.jpg) +Figure 6. Effect of number of total objects in the image and number of occluded objects on sMAPE from $\mathrm{CAPTURE}^{\mathrm{synthetic}}$ (occluded split). Metric: sMAPE (lower is better). + +![](images/0aacafaba92370dae1f0e5b5f4e96b3961bd610b4f253bcf291566faf27d3a59.jpg) + +![](images/6dcb61ccb0400b9be87716c1ec763a1aee18b1ea03c0a515f3207f2846c30ff6.jpg) +Figure 7. Effect of pattern type in CAPTURE $^{\text{synthetic}}$ (occluded split) on sMAPE. Metric: sMAPE (lower is better). + +spect to the number of occluded dots. However, Fig. 6 (left) also shows that performance is less affected by the total number of dots. This suggests that the task difficulty is more closely correlated with the difficulty of occlusion – i.e. the difficulty of the world modeling task – rather than the complexity of the pattern. Some models, such as GPT-4o, deviate from this trend, which has lower error on specific numbers. We further explore model bias in Appendix C.5. + +Performance depends on pattern type. The controllability of $\mathrm{CAPTURE}^{\mathrm{synthetic}}$ allows us to measure the effect of pattern type on performance. In Fig. 7, we find that model performance differs across shapes with some regularity: objects arranged in a circle generally have lower sMAPE than other shapes, across all models. Qwen2VL has an espe + +
ModelAccuracy (%) [↑]
Originalw/ Occlusion (Δ)
GPT-4o84.0078.52 (-5.48)
InternVL268.5247.48 (-21.04)
Molmo80.7065.22 (-15.48)
Qwen2VL88.3586.43 (-1.92)
Avg. of 4 VLMs80.3969.41 (-10.98)
+ +Table 4. VLM accuracy in identifying the correct pattern in CAPTUREsynthetic. Metric: accuracy (higher is better). + +cially large decrease in error when given circular arrangements compared to rectangles or triangles. + +Models can identify patterns. To determine how much model errors can be attributed to a lack of pattern recognition ability, we formulate a separate task where models must recognize the pattern in the image on CAPTURE $^{\text{synthetic}}$ . Here, we frame the task as multiple-choice, asking the model to select from the pattern types available (rectangle, triangle, or circle). Table 4 illustrates that all perform substantially better than random at this task, with most models except InternVL2 achieving accuracy above $80\%$ in the unoccluded setting. As expected, the patterns were easier to identify in unoccluded scenarios, with models suffering an average accuracy drop of $10.95\%$ in the occluded setting. Notably, GPT-4o and Qwen2VL have a fairly small drop in performance, suggesting they can generally capture the pattern even in the presence of occlusion. + +# 4.3. Analysis with Auxiliary Information + +In Sec. 4.1, we see that models broadly struggle with amodal counting. Here, we seek to disentangle whether this problem results from a failure to reason, the absence of a world model, or both by giving VLMs two different types of auxiliary information: oracle information and predicted information. Oracle information is ground truth and is directly pulled from CAPTURE's metadata, e.g., object locations. Predicted information generates new information from a completely separate model and gives it to the VLM. This information is not ground truth and is sourced from an external model, such as an image inpainting model, rather than the VLM. By giving the model auxiliary information in the form of reasoning and spatial clues, we can establish how much of each model's error results from an inability to handle occlusion rather than an inability to recognize and count visible objects. + +Oracle setup. We test two oracles for $\mathsf{CAPTURE}^{\mathsf{real}}$ 's occluded split based on its constituent subtasks: counting the visible objects and inferring/counting occluded objects. Both oracles provide the VLM with text-based coordinates + +![](images/c9dcd23e293902294ad0bb304302126a7e2ee754cdd92911fd7e5aabcfc11d68.jpg) +With Occlusion +With Occlusion Prompt: Count the exact number of cans in the image. Assume the pattern of cans continues behind any black box. Provide the total number of cans as if the black box were not there. +Figure 8. Example image and text inputs for experiments with auxiliary information experiments (Sec. 4.3). Blue eyes indicate objects for which the All Object Coordinate Oracle or Visible Object Coordinate Oracle extracts coordinates. The brighter part of the image represents the area which Inpainting Pipeline fills in. Example prompts are shown in italics. Blue eye overlays and faded parts of images are for demonstration purposes and are not passed with the image. + +![](images/fdce1f292eb8c63e5d8886496c488350aad21c286ba6461c0abc5dfe7d1ccd31.jpg) +All Object Coordinate Oracle +(w/Oracle information) +All Object Coordinate Oracle Prompt: Count the exact number of cans in the image, including behind the black box... Coordinates of all cans:59,43219,38356,43 522,3663,18073,335214 186),379,184),524,177220 332372,329525,325 + +![](images/d862b1fc3435e11a6f995250f084863846b2a1867e3e95f87cda3d508c3650df.jpg) +Visible Object Coordinate Oracle +(w/Oracle information) +Visible Object Coordinate +Oracle Prompt: Count the exact number of cans in the image, including behind the black box... +Coordinates of visible cans: (59, 43), (219, 38), (356, 43), (522, 36), (63, 180), (73, 335) + +![](images/8578bf7e10fea3496bee51ad25b496e2829784d6fda6ca668364777004884e8c.jpg) +Inpainting Pipeline +(w/ Predicted information) +Inpainting Pipeline Prompt: +Count the exact number of cans in the image. +(Fading added only for emphasis to visualize infilling. Final image given to VLM is not faded) + +
ModelOriginalw/ OcclusionOracle InformationPredicted Information
+ All Coordinates (Δ)+ Visible (Δ)+ Inpainting (Δ)
GPT-4o13.3414.752.93 (-11.82)9.20 (-5.55)15.89 (+1.14)
InternVL226.1732.9017.48 (-15.42)25.13 (-7.77)31.12 (-1.78)
Qwen2VL18.9629.339.62 (-19.71)17.70 (-11.63)22.64 (-6.69)
Avg. of 3 VLMs19.4925.6610.01 (-15.65)17.34 (-8.32)23.22 (-2.44)
+ +Table 5. Effect of auxiliary information on occluded CAPTURE ${}^{\text{real. }}\Delta =$ (Auxiliary Information) - (w/ Occlusion). Metric: sMAPE. + +of objects in the image, simplifying the visual task by assuming the VLM effectively has a perfect visual system that can recognize and localize objects in the image. The first oracle, the Visible Object Coordinate Oracle, gives the VLM the coordinates of all unoccluded objects (encoded as text, as seen in Fig. 8) and instructs the model to estimate the number of occluded objects, count the number of visible object coordinates, and add the two. In other words, the model is given oracle information about what objects are visible, thus also revealing key information about the pattern. The second oracle, the All Object Coordinate Oracle, instead gives the model the coordinates of all objects. Here, the model only needs to count the coordinates in the prompt, eliminating the need to reason on the visual input. Note that Molmo is excluded in these tests because it contains a prompt limit that would truncate the list of coordinates. An example of the oracle inputs can be seen in Fig. 8. + +Prediction setup. In this setting, we provide the VLM with an external world model representation predicted by another model. Specifically, we develop the Inpainting Pipeline to fill in the occluded region via a diffusion-based inpainting model and pass the inpainted image to the VLMs. For the inpainting model, we choose FLUX.1-Fill [dev], + +whose backbone FLUX.1 [dev] [21] is a top public model in the Text to Image Model Arena [7]. An example input to the VLM can be seen on the far-right of Fig. 8. + +Providing visible or all object coordinates improves performance substantially. The results in Tab. 2 indicate that models struggle on CAPTURE, which requires identifying a pattern and counting both visible and occluded objects. Moreover, models generally struggle with counting even in unoccluded settings. Both oracles simplify the counting task: All Object Coordinate Oracle reduces the task to simply counting coordinates with no reasoning involved, and Visible Object Coordinate Oracle similarly simplifies the task for visible objects, while still requiring inferring occluded objects. Additionally, under Visible Object Coordinate Oracle, recognizing the pattern shifts from a visual reasoning task to an augmented math problem. Instead of visually reasoning about where objects are located, the VLM considers what patterns the coordinates could make. Translating this task into a text problem results in an average increase of $15\%$ with all objects coordinate oracle; the errors LLMs make here are due to an inability to count in the text prompt, as opposed to weaknesses in handling occlusion (since all object coordinates are given), and the strongest + +model, GPT-4o, achieves minimal error here. We also obtain an average increase of $8\%$ with the visible objects coordinate oracle (shown in Tab. 5), possibly because it allows the more powerful LLM backbone (which is far larger than the vision model in all models tested) to complete the counting task. Taken together, these results suggest that there is much room for improvement in visual world modeling beyond text-based reasoning of VLMs. + +Providing diffusion-based inpainting improves performance marginally. Similar to the object coordinate oracles, the Inpainting Pipeline (rightmost columns in Fig. 8 and Tab. 5) eliminates the need for world modeling and provides VLMs with an approximation of the image behind the occluder. With the inpainted images, VLM error decreases by almost $2\%$ for InternVL2 and $7\%$ for Qwen2VL compared to the original occluded images. GPT-4o's error increases on inpainted images by a small margin; we hypothesize that this may be because GPT-4o has one of the better world models (based on its superior performance), and thus does not improve further with the inpainted images. Moreover, every VLM still falls short of its unoccluded image performance, indicating that the diffusion model is not a perfect world model. Qualitatively, we find that the inpainting model sometimes fails to output the correct pattern. + +# 5. Related Work + +Spatial reasoning in visual question answering. Past work measures the spatial reasoning capabilities of VLMs in the form of visual question answering (VQA) [4, 16] benchmarks. SpartQA [26] asks VLMs to identify the spatial relation (e.g., above, behind, left of) between objects in synthetically created 2D images from NLVR [39]. More recent benchmarks test similar spatial relation understanding with real images [2, 24, 36]. While this past work asks models to provide a text description for a relation between two fully observed objects, CAPTURE measures the world modeling from a partially observed scene, thus requiring the handling of occlusion, pattern recognition, and counting. Together, these constitute a stricter test of spatial reasoning than typical VQA settings. + +Amodal completion. Occlusions are common in natural scenes, and vision solutions for amodal completion have made significant progress in infilling occlusions [6, 38, 46]. The amodal completion task has evolved from simply completing a shape to filling in appearance (e.g., texture, color, etc.) to finally dealing with fine-grained order perception (multiple stacked occluded objects) [5]. Specifically in Qiu and Di [34], VLMs classify the hidden objects and extract fine details from occluded items. CAPTURE, however, presents a unique category of patterned amodal counting which requires inferring fully occluded objects based on a + +pattern rather than inferring occluded object wholes based on object parts. In other words, previous work has only attempted tasks that require amodal completion for one object at a time [31, 38, 46], whereas CAPTURE handles multiple objects. Multi-object amodal completion is crucial because in cluttered scenes, entire groups of objects are often occluded. Moreover, the output space of CAPTURE is language (rather than filling pixels). + +Counting with vision-and-language models. Within the task of counting, the most similar application to CAPTURE is dense counting, where the objects to be counted occlude each other. There are many practical applications of such a task, like counting cells on a crowded slide [8], determining crop yields from densely-packed fields [43], or crowd counting [14, 44, 48]. Liang et al. [23] improved crowd counting with an augmented CLIP [35], i.e. also using VLMs for counting. Additionally, Jenkins et al. [18] introduced an amodal counting benchmark, presenting an occluded 3D counting task where models must count objects on retail shelves. However, our work differs in many ways, as Jenkins et al. [18] only counts retail shelves and uses Li-DAR input. More broadly, dense counting focuses on overlapping objects rather than on counting objects arranged into patterns, which is the focus of CAPTURE. + +# 6. Conclusion + +We introduced CAPTURE, a novel benchmark for amodal counting that measures spatial reasoning capabilities under occlusion. CAPTURE is designed to assess VLMs' ability to form a robust world model and use that model for visual reasoning skills under occlusion. By testing counting, we cast the problem as a measurable task with an objective correct answer that also has real-world utility as VLMs become more broadly adopted. Our results suggest that VLMs struggle to combine reasoning, counting, and world modeling with low performance on occluded and unoccluded images. Our analysis indicates that models improve with oracle information about visible objects (simplifying the reasoning/counting tasks) and predicted information about the occluded objects (also simplifying world modeling), pointing to directions of model improvement. + +# Acknowledgments + +This work was supported by DARPA ECOLE Program No. HR00112390060, NSF-CAREER Award 1846185, NSF-AI Engage Institute DRL-2112635, DARPA Machine Commonsense (MCS) Grant N66001-19-2-4031, ARO Award W911NF2110220, ONR Grant N00014-23-1-2356, Microsoft Accelerate Foundation Models Research (AFMR) grant program, and a Bloomberg Data Science PhD Fellowship. The views contained in this article are those of the authors and not of the funding agency. + +# References + +[1] AI@Meta. Llama 3.1 model card. *Github Model Card*, 2024. 4 +[2] Haider Al-Tahan, Quentin Garrido, Randall Balestriero, Diane Bouchacourt, Caner Hazirbas, and Mark Ibrahim. Unibench: Visual reasoning requires rethinking vision-language beyond scaling. arXiv preprint arXiv:2408.04810, 2024. 8 +[3] Niki Amini-Naeni, Tengda Han, and Andrew Zisserman. Countgd: Multi-modal open-world counting. arXiv preprint arXiv:2407.04619, 2024. 2, 4, 5 +[4] Stanislaw Antol, Aishwarya Agrawal, Jiasen Lu, Margaret Mitchell, Dhruv Batra, C Lawrence Zitnick, and Devi Parikh. Vqa: Visual question answering. In Proceedings of the IEEE international conference on computer vision, pages 2425-2433, 2015. 8 +[5] Jiayang Ao, Qiuhong Ke, and Krista A Ehinger. Image amodal completion: A survey. Computer Vision and Image Understanding, 229:103661, 2023. 1, 8 +[6] Jiayang Ao, Yanbei Jiang, Qiuhong Ke, and Krista A Ehinger. Open-world amodal appearance completion. arXiv preprint arXiv:2411.13019, 2024. 8 +[7] Artificial Analysis. Text to image model arena, 2025. Accessed: April 10, 2025. 7 +[8] Soumen Bera. Partially occluded object detection and counting. In Proceedings of the 2015 Third International Conference on Computer, Communication, Control and Information Technology (C3IT), pages 1-6. IEEE, 2015. 8 +[9] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, Bin Li, Ping Luo, Tong Lu, Yu Qiao, and Jifeng Dai. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. arXiv preprint arXiv:2312.14238, 2023. 4 +[10] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 4 +[11] Davide Chicco, Matthijs J Warrens, and Giuseppe Jurman. The coefficient of determination r-squared is more informative than smape, mae, mape,mse and rmse in regression analysis evaluation. Peerj computer science, 7:e623, 2021. 11 +[12] Nikolas Coupland. How frequent are numbers? Language & Communication, 31(1):27-37, 2011. 13 +[13] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, et al. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv preprint arXiv:2409.17146, 2024. 4 +[14] Zheyi Fan, Zihao Song, Di Wu, and Yixuan Zhu. Multibranch segmentation-guided attention network for crowd counting. Journal of Visual Communication and Image Representation, 97:103964, 2023. 8 +[15] Benito E Flores. A pragmatic view of accuracy measurement in forecasting. Omega, 14(2):93-98, 1986. 11 + +[16] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Bartra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6904-6913, 2017. 8 +[17] David Ha and Jürgen Schmidhuber. Recurrent world models facilitate policy evolution. Advances in neural information processing systems, 31, 2018. 1 +[18] Porter Jenkins, Kyle Armstrong, Stephen Nelson, Siddhesh Gotad, J Stockton Jenkins, Wade Wilkey, and Tanner Watts. Countnet3d: A 3d computer vision approach to infer counts of occluded objects. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3008-3017, 2023. 8 +[19] Gaetano Kanizsa, Paolo Legrenzi, and Paolo Bozzi. Organization in vision: essays on gestalt perception. Praeger, 1979. 1 +[20] Kaleb Kassaw, Francesco Luzi, Leslie M Collins, and Jordan M Malof. Are deep learning models robust to partial object occlusion in visual recognition tasks? arXiv preprint arXiv:2409.10775, 2024. 2 +[21] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024.7 +[22] Baiqi Li, Zhiqiu Lin, Wenxuan Peng, Jean de Dieu Nyandwi, Daniel Jiang, Zixian Ma, Simran Khanuja, Ranjay Krishna, Graham Neubig, and Deva Ramanan. Naturalbench: Evaluating vision-language models on natural adversarial samples. arXiv preprint arXiv:2410.14669, 2024. 2 +[23] Dingkang Liang, Jiahao Xie, Zhikang Zou, Xiaqing Ye, Wei Xu, and Xiang Bai. Crowdclip: Unsupervised crowd counting via vision-language model. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2893-2903, 2023. 8 +[24] Fangyu Liu, Guy Edward Toh Emerson, and Nigel Collier. Visual spatial reasoning. Transactions of the Association for Computational Linguistics, 2023. 8 +[25] Baraka Jacob Maiseli. Optimum design of chamfer masks using symmetric mean absolute percentage error. EURASIP Journal on Image and Video Processing, 2019(1):74, 2019. 11 +[26] Roshanak Mirzaee and Hossein Rajaby. Spartqa: A textual question answering benchmark for spatial reasoning. In The 2021 Annual Conference of the North American Chapter of the Association for Computational Linguistics (NAACL-2021), 2021. 8 +[27] Ingrid R Olson, J Christopher Gatenby, Hoi-Chung Leung, Pawel Skudlarski, and John C Gore. Neuronal representation of occluded objects in the human brain. Neuropsychologia, 42(1):95-104, 2004. 1, 2 +[28] OpenAI. Hello gpt-4o, 2024. 4 +[29] OpenCompass Team. Openvlm leaderboard. https://huggingface.co/spaces/opencompass/open_vlmleaderboard, 2024. Accessed: 2024-11-13. 4 +[30] Yumiko OTSUKA, So KANAZAWA, and Masami K YAMAGUCHI. Development of modal and amodal completion in infants. Perception (London. Print), 35(9):1251-1264, 2006. 1, 2 + +[31] Ege Ozguroglu, Ruoshi Liu, Dídac Surís, Dian Chen, Achal Dave, Pavel Tokmakov, and Carl Vondrick. pix2gestalt: Amodal segmentation by synthesizing wholes. In 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3931-3940. IEEE Computer Society, 2024. 8 +[32] Max Peeperkorn, Tom Kouwenhoven, Dan Brown, and Anna Jordanous. Is temperature the creativity parameter of large language models? arXiv preprint arXiv:2405.00492, 2024. 12 +[33] Muhammad Fetrat Qharabagh, Mohammadreza Ghofrani, and Kimon Fountoulakis. Lvlm-count: Enhancing the counting ability of large vision-language models. arXiv preprint arXiv:2412.00686, 2024. 2 +[34] Wenmo Qiu and Xinhan Di. Occ-mlm: Empowering multimodal large language model for the understanding of occluded objects. arXiv preprint arXiv:2410.01261, 2024. 8 +[35] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 8 +[36] Navid Rajabi and Jana Kosecka. Gsr-bench: A benchmark for grounded spatial reasoning evaluation via multimodal llms. arXiv preprint arXiv:2406.13246, 2024. 8 +[37] Viresh Ranjan, Udbhav Sharma, Thu Nguyen, and Minh Hoai. Learning to count everything. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3394-3403, 2021. 3 +[38] Kaziwa Saleh, Sándor Szenási, and Zoltán Vámossy. Mask guided gated convolution for amodal content completion. In 2024 IEEE 22nd Jubilee International Symposium on Intelligent Systems and Informatics (SISY), pages 000321-000326. IEEE, 2024. 8 +[39] Alane Suhr, Mike Lewis, James Yeh, and Yoav Artzi. A corpus of natural language for visual reasoning. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 217-223, Vancouver, Canada, 2017. Association for Computational Linguistics. 8 +[40] Kimi Team, Angang Du, Bohong Yin, Bowei Xing, Bowen Qu, Bowen Wang, Cheng Chen, Chenlin Zhang, Chenzhuang Du, Chu Wei, Congcong Wang, Dehao Zhang, Dikang Du, Dongliang Wang, Enming Yuan, Enzhe Lu, Fang Li, Flood Sung, Guangda Wei, Guokun Lai, Han Zhu, Hao Ding, Hao Hu, Hao Yang, Hao Zhang, Haoning Wu, Haotian Yao, Haoyu Lu, Heng Wang, Hongcheng Gao, Huabin Zheng, Jiaming Li, Jianlin Su, Jianzhou Wang, Jiaqi Deng, Jiezhong Qiu, Jin Xie, Jinhong Wang, Jingyuan Liu, Junjie Yan, Kun Ouyang, Liang Chen, Lin Sui, Longhui Yu, Mengfan Dong, Mengnan Dong, Nuo Xu, Pengyu Cheng, Qizheng Gu, Runjie Zhou, Shaowei Liu, Sihan Cao, Tao Yu, Tianhui Song, Tongtong Bai, Wei Song, Weiran He, Weixiao Huang, Weixin Xu, Xiaokun Yuan, Xingcheng Yao, Xingzhe Wu, Xinxing Zu, Xinyu Zhou, Xinyuan Wang, Y. Charles, Yan Zhong, Yang Li, Yangyang Hu, Yanru Chen, Yejie Wang, Yibo Liu, Yibo Miao, Yidao Qin, Yimin Chen + +Yiping Bao, Yiqin Wang, Yongsheng Kang, Yuanxin Liu, Yulun Du, Yuxin Wu, Yuzhi Wang, Yuzi Yan, Zaida Zhou, Zhaowei Li, Zhejun Jiang, Zheng Zhang, Zhilin Yang, Zhiqi Huang, Zihao Huang, Zijia Zhao, and Ziwei Chen. Kimi-VL technical report, 2025. 4 +[41] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 4 +[42] Wei-Yao Wang, Zhao Wang, Helen Suzuki, and Yoshiyuki Kobayashi. Seeing is understanding: Unlocking causal attention into modality-mutual attention for multimodal llms. arXiv preprint arXiv:2503.02597, 2025. 2 +[43] Yiding Wang, Yuxin Qin, and Jiali Cui. Occlusion robust wheat ear counting algorithm based on deep learning. Frontiers in Plant Science, 12:645899, 2021. 8 +[44] Yongjie Wang, Feng Wang, and Dongyang Huang. Dual-branch counting method for dense crowd based on self-attention mechanism. Expert Systems with Applications, 236:121272, 2024. 8 +[45] Karen Wynn. Children's understanding of counting. Cognition, 36(2):155-193, 1990. 1, 2 +[46] Katherine Xu, Lingzhi Zhang, and Jianbo Shi. Amodal completion via progressive mixed context diffusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9099-9109, 2024. 8 +[47] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. Minicpm-v: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024. 4 +[48] Lifang Zhou, Songlin Rao, Weisheng Li, Bo Hu, and Bo Sun. Multi-branch progressive embedding network for crowd counting. Image and Vision Computing, page 105140, 2024. 8 + +# Appendix + +# A. Implementation Details + +# A.1. Metric Details + +We use symmetric mean percent error (sMAPE) as the primary metric for our benchmarks due to its resistance to bias for under/over predictions and small/large ground truths [25]. The standard metric for a counting benchmark is mean average error (MAE). MAE is popular, but heavily penalizes predictions that deviate by a small margin from big ground truths, highlighting the necessity for a metric that gives equal weighting to all questions. Mean average percent error (MAPE) initially seems appealing but is disproportionately inflated for small ground truths and is biased towards overpredictions. Mean square error (MSE) and root mean square error (RMSE) are also commonly used but are very sensitive to outliers because they square the error. Intuitively, performing well on almost all questions and poorly on a small subset should score better than consistently being wrong. Among commonly-used metrics, sMAPE is the only metric that evaluates performance in relation to the distribution of ground truth elements [11]. There are two common definitions [15] for sMAPE, but we use the one that scales to $100\%$ . sMAPE is given by: + +$$ +\mathrm {s M A P E} = 1 0 0 \cdot \frac {1}{n} \sum_ {i = 1} ^ {n} \frac {\left| y _ {i} - \hat {y} _ {i} \right|}{\left| y _ {i} \right| + \left| \hat {y} _ {i} \right|} \tag {2} +$$ + +where $y_{i}$ represents the actual values, $\hat{y}_i$ represents the predicted values, and $n$ is the number of observations. sMAPE is capped at $100\%$ , providing a finite scoring range. This feature is ideal for challenging tasks like ours, as it penalizes model responses that fail to produce an answer. + +# A.2. Output Tokens + +To maximize the VLM's chance at success, we allocate a high number of output tokens to generate a rationale and output. This varies per model. We give 4000 tokens to InternVL2, 2000 tokens to Molmo, and 8192 tokens to Qwen2VL, following their max output lengths. For GPT-40, we use the default of 4096 tokens. + +# B. CAPTURE Dataset Creation Details + +The following expands upon Sec. 2.2. While FSC-147, a diverse counting dataset with manual annotations, is a strong starting point, it cannot immediately be adapted to our task. To make the task of amodal counting solvable, our dataset requires images with patterns in them. A person (or model) can infer how the pattern would continue and thus accurately predict the total number. For questions to be answerable, the dataset's images must be filtered down to represent patterns a model or person could recognize. + +Our filtering process follows two stages. First, we prompt GPT-4o to determine whether the objects were arranged in a pattern. Second, if the model responded with "no", the images were immediately discarded. If the model output was "yes", the log probability of the token is stored. Empirically, we found that higher log probability values (i.e. higher confidence scores) corresponded to more well-defined patterns in the image. Thus, we use the log probabilities for filtering. + +Specifically, let $P_{\mathrm{yes}}$ be the log probability of the "yes" token and $T$ denote the threshold for determining how well-defined a pattern is. To filter the images based on pattern rigidity, we apply the following condition: $e^{P_{\mathrm{yes}}} \geq T$ . This inequality yields 991 images from the original dataset (16.12%). Next, we manually filter each of the selected images to ensure that they indeed contain patterns and feature a countable number of objects, excluding 34 images. Afterward, we manually place a "fair" occluding box in each image, i.e. a box that leaves sufficient portions of the pattern visible, such that the pattern can still be inferred from the unoccluded portions of the image. Occluding boxes were also chosen with varying positions and sizes in the image. + +# C. Additional Analysis + +Here we provide additional experiments that attempt to either increase model performance on CAPTURE or dissect the reasons behind poor model performance. Chain-of-Thought inhibits model performance, while temperature backoff slightly improves performance. Additionally, we find that models struggle at counting just occluded objects, are overconfident in occluded settings, and are biased to predict specific numbers. + +# C.1. Chain-of-Thought reduces model performance + +
MethodCAPTURErealCAPTUREsynthetic
GPT-4o14.759.71
GPT-4o w/ CoT14.947.73
Qwen229.3311.74
Qwen2 w/ CoT31.5737.81
+ +Table 6. CoT experiments (metric: sMAPE). + +During development, we experimented with several common strategies including CoT. In Tab. 6, we find that CoT reduces model performance except in the occluded synthetic scenario, most likely because the included examples are very similar to the test prompt. + +
ModelError (%) (↓)
RealSynthetic
UnoccludedOccludedUnoccludedOccluded
Originalw/ backoff (Δ)Originalw/ backoff (Δ)Originalw/ backoff (Δ)Originalw/ backoff (Δ)
GPT-4o13.3412.57 (−0.77)14.7514.39 (−0.36)5.905.93 (+0.03)9.719.23 (−0.48)
InternVL226.1727.09 (+0.92)32.9032.37 (−0.53)16.4415.59 (−0.85)17.5716.24 (−1.33)
Molmo25.9021.23 (−4.67)32.4928.17 (−4.32)8.402.88 (−5.52)17.7315.85 (−1.88)
Qwen2VL18.9619.40 (+0.44)29.3328.47 (−0.86)6.636.66 (+0.03)11.7411.51 (−0.23)
Avg. of 4 VLMs21.0920.07 (−1.02)27.3725.85 (−1.52)9.347.76 (−1.58)14.1913.21 (−0.98)
+ +# C.2. Temperature backoff slightly improves model performance + +To improve VLM performance on CAPTURE, we address a trend we established during early testing. Most of the time, the VLM fails by reaching an incorrect answer. Sometimes, however, our benchmark can cause VLMs to produce a long and irrelevant response that strays from the original prompt, leading to the worst possible sMAPE score (100%). + +To reduce the number of skipped questions, we experiment with temperature backoff, which iteratively decreases the sampling temperature. Because the answer extractor can immediately identify an incoherent output, we can regenerate the response with a lower temperature to get the model to answer the task properly. Consistent with our findings, Peeperkorn et al. [32] also finds that lower temperatures increase coherence in VLMs, thereby enhancing their chances of maintaining relevance to the prompt. Therefore, temperature backoff gives VLMs a better chance of achieving higher scores. Each time the answer extractor returns an empty answer because the VLMs produced an incoherent answer, we reduce the temperature by 0.1 (starting from 1.0) until it reaches 0.0, at which point the example is skipped. + +Models perform slightly better with temperature backoff. We introduced temperature backoff to reduce model incoherence, and it performed fairly well. As shown in Tab. 7 (bottom), this method slightly improves performance across each model, resulting in an average error reduction of $5.78\%$ in $\mathrm{CAPTURE}^{\mathrm{real}}$ and $5.45\%$ in $\mathrm{CAPTURE}^{\mathrm{synthetic}}$ . Temperature backoff essentially allows the model to reattempt the question if it fails to respond to the prompt. Similar to previous results, positive results from reattempts highlight VLMs' weak reasoning abilities. + +Table 7. Comparison of models on CAPTURE across four scenarios (CAPTURE $^{\text{real}}$ vs. CAPTURE $^{\text{synthetic}}$ , Unoccluded vs. Occluded). "Original" indicates no backoff; "w/ backoff" indicates applying backoff, with $\Delta = (w/ backoff) - (Original)$ . Negative $\Delta$ values indicate an improvement. + +
ModelError (%) [↓]
All ObjectsOnly Occluded
GPT-4o14.7526.13 (+11.38)
InternVL232.9075.82 (+42.92)
Molmo32.4996.79 (+64.30)
Qwen2VL29.3332.89 (+3.56)
Avg. of 4 VLMs27.3757.91 (+30.54)
+ +Table 8. VLM sMAPE for counting all objects and counting only the occluded objects in CAPTURE $^{\text{real}}$ . Metric: sMAPE (lower is better). + +# C.3. Models struggle at counting just occluded objects + +We separately test whether models can count only the occluded objects (not including the visible objects) in an image. Here, as Tab. 8 demonstrates, the models perform especially poorly in this task, with high error rates across all models. Therefore, we can conclude that occlusion and counting are uniquely difficult for the VLMs, and that the drop in performance between unoccluded and occluded settings in Tab. 2 is likely due to a poor ability to count occluded objects. + +# C.4. Models are overconfident in occluded settings + +We test the uncertainty with two different methods of obtaining confidence on Qwen2VL. In the first method, we prompt Qwen2VL for its confidence in the answer. For the second method, we generate 20 responses for every question in our VQA and calculate the confidence as the percentage of times the most common answer was generated. These results can be seen in Fig. 9 and Fig. 10 respectively. In both reliability curves, there is a slight trend that the model's confidence is negatively correlated with the error, which is the desired outcome. In $\mathrm{CAPTURE}^{\mathrm{real}}$ , how- + +ever, the correlation is much stronger. While the models are somewhat calibrated (with generally lower confidence on higher-error examples, there are still outliers in prompted confidence for CAPTURE $^{\text{real}}$ occluded and sampled confidence for CAPTURE $^{\text{synthetic}}$ occluded. This indicates that not only do the models perform worse under occlusion, but they can also be overconfident. + +![](images/c611c7dddf48809c8a6e0d57ebe548a151fb1cc3df349e754f8ed43ae455fa99.jpg) +Figure 9. Reliability curve of prompting model for confidence vs. sMAPE. + +![](images/12bebc6d15e6500bbabf290ea88f67842250f7147b933634b3e9da5cb0518d18.jpg) +Figure 10. Reliability curve of sampling model for confidence vs. sMAPE. + +# C.5. Models are biased to predict specific numbers. + +To examine where models frequently err, we generated a confusion matrix for every model based on CAPTURE $^{\text{synthetic}}$ results (shown in Appendix C.5). The y-axis represents the ground truth values and the x-axis represents the model's answers. We find that models often over-predict + +numbers associated with common counts in real life: GPT-40 tends to predict numbers like 8, 9, 10, and 12, which are all non-prime numbers (i.e. can be arranged into a grid) and common groupings of objects. For example, 12 is a common grouping (dozens) and allows arrangements into 3x4 or 2x6 grids. InternVL and Qwen2VL over-predict 5 and 10, aligning with how humans conceptualize numbers. Indeed, Coupland [12] found that numbers 5, 10, 20, and other round numbers appear disproportionately more in online texts. Molmo has no correlation with these factors, possibly due to its unique "point and count" ability. + +# D. VLM Prompts + +We use a 100-example validation set for each setting to select the best prompt, which we report below. + +# Prompt for GPT-4o on CAPTURE $^{\text{real}}$ unoccluded split. + +Count the exact number of [object] in the image. Assume the pattern of [object] continues behind any black box. Provide the total number of [object] as if the black box were not there. + +# Prompt for InternVL2 on CAPTURE $^{\text{real}}$ unoccluded split. + +Your task is to count objects in the image. First, state what the pattern is, then give your final count. + +# Prompt for Molmo on CAPTURE $^{\text{real}}$ unoccluded split. + +Count the exact number of [object] in the image. Only count [object] that are visible within the frame. If [object] are partially in the frame (i.e. if any part of [object] are visible), count it. + +# Prompt for Qwen2VL on CAPTURE $^{\text{real}}$ unoccluded split. + +Count the exact number of [object] in the image. Assume the pattern of [object] continues behind any black box. Provide the total number of [object] as if the black box were not there. Only count [object] that are visible within the frame (or would be visible without the occluding box). If [object] are partially in the frame (i.e. if any part of [object] are visible), count it. If the [object] would be partially in the frame without the occluding box, count it. + +![](images/08f6c9e3444d44c81dce730c5794b8ed561370ede8d77fbdb08e1d8da80f2eaa.jpg) + +![](images/bc42a00ebe464d45c0fda199fa3866d224671941005433a1ffed10ada4f7fc65.jpg) + +![](images/b764ec1f7d7b2599e57b7f0c6a90c64e02b3a7d1446773a3ab1bb37a3fede70d.jpg) +Figure 11. Confusion matrix: predicted vs. ground truth counts for CAPTURE $^{\text{real}}$ s occluded split. + +![](images/6bbc087c1a75124b22630e02712bd7e1f1c4ff2a89de05e3a88dda5c6c31f81a.jpg) + +# Prompt for GPT-4o, InternVL2, and Qwen2VL on CAPTURE $^{\text{real}}$ occluded split. + +Count the exact number of [object] in the image. Assume the pattern of [object] continues behind any black box. Provide the total number of [object] as if the black box were not there. Only count [object] that are visible within the frame (or would be visible without the occluding box). If [object] are partially in the frame (i.e. if any part of [object] are visible), count it. If the [object] would be partially in the frame without the occluding box, count it. Molmo: Your task is to count objects in the image. Assume the pattern of [object] continues behind the black box. First, state what the pattern is, then give your final count. + +# Prompt for Molmo on CAPTURE $^{\text{real}}$ occluded split. + +Your task is to count objects in the image. Assume the pattern of [object] continues behind the black box. First, state what the pattern is, then give your final count. + +# Prompt for GPT-4o on CAPTUREsynthetic unoccluded split. + +Your task is to count objects in the image. First, state what the pattern is, then give your final count. + +# Prompt for InternVL2 on CAPTUREsynthetic unoccluded split. + +Count the exact number of [dot shape]s in the image. Only count [dot shape]s that are visible within the frame. If [dot shape]s are partially in the frame (i.e. if any part of [dot shape]s are visible), count it. + +# Prompt for Molmo on CAPTUREsynthetic unoccluded split. + +Count the exact number of [dot shape]s in the image. Only count [dot shape]s that are visible within the frame. + +# Prompt for Qwen2VL on CAPTUREsynthetic unoccluded split. + +Count the exact number of [dot shape]s in the image. Assume the pattern of [dot shape]s continues behind any black box. Provide the total number of [dot shape]s as if the black box were not there. Only count [dot shape]s that are visible within the frame (or would be visible without the occluding box). If [dot shape]s are partially in the frame (i.e. if any part of [dot shape]s are visible), count it. If the [dot shape]s would be partially in the frame without the occluding box, count it. + +# Prompt for GPT-4o and Molmo on CAP-TUREsynthetic occluded split. + +Your task is to count objects in the image. Assume the pattern of [dot shape]s continues behind the black box. First, state what the pattern is, then give your final count. + +# Prompt for InternVL2 and Qwen2VL on CAPTUREsynthetic occluded split. + +Count the exact number of [dot shape]s in the image. Assume the pattern of [dot shape]s continues behind any black box. Provide the total number of [dot shape]s as if the black box were not there. Only count [dot shape]s that are visible within the frame (or would be visible without the occluding box). If [dot shape]s are partially in the frame (i.e. if any part of [dot shape]s are visible), count it. If the [dot shape]s would be partially in the frame without the occluding box, count it. \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15485/images/0320e46c96b593a455fec0826baa3f421db6d2c4a95077806655be22fb113a09.jpg b/data/2025/2504_15xxx/2504.15485/images/0320e46c96b593a455fec0826baa3f421db6d2c4a95077806655be22fb113a09.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b1e7e1fa6c7c4a69b285ff8eaf54a6d987ee4050 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/0320e46c96b593a455fec0826baa3f421db6d2c4a95077806655be22fb113a09.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f12a42b169c912344b0a9e43737a05f9d91c82ad00ed08ec996102d7e608b40 +size 9020 diff --git a/data/2025/2504_15xxx/2504.15485/images/037e79bd08c36fe6b80824bcb0caf4643a0ad47db6c01c322d5a57de9b716faa.jpg b/data/2025/2504_15xxx/2504.15485/images/037e79bd08c36fe6b80824bcb0caf4643a0ad47db6c01c322d5a57de9b716faa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a2830c66d67105d5dee4943bfbdc16c6695992a --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/037e79bd08c36fe6b80824bcb0caf4643a0ad47db6c01c322d5a57de9b716faa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2df292422c59ce881f453a4d8c5b0cf97234fdca3871c6a5e0f1de761c48750 +size 3225 diff --git a/data/2025/2504_15xxx/2504.15485/images/08f6c9e3444d44c81dce730c5794b8ed561370ede8d77fbdb08e1d8da80f2eaa.jpg b/data/2025/2504_15xxx/2504.15485/images/08f6c9e3444d44c81dce730c5794b8ed561370ede8d77fbdb08e1d8da80f2eaa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0518fe037bc71be1cc3ac95cbc064072821635f2 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/08f6c9e3444d44c81dce730c5794b8ed561370ede8d77fbdb08e1d8da80f2eaa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:005d29c3420a3abea338d320100e62b19f85ebc2046e8df75a4035d385e51b87 +size 52259 diff --git a/data/2025/2504_15xxx/2504.15485/images/0aacafaba92370dae1f0e5b5f4e96b3961bd610b4f253bcf291566faf27d3a59.jpg b/data/2025/2504_15xxx/2504.15485/images/0aacafaba92370dae1f0e5b5f4e96b3961bd610b4f253bcf291566faf27d3a59.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4e2c09d9d29711e9ac2b1ac5556ec3a4b1bc46b7 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/0aacafaba92370dae1f0e5b5f4e96b3961bd610b4f253bcf291566faf27d3a59.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:505b5c017954e4463effceee5ecb5a8d0e44c35e4ea77c05aa526318e4db1f20 +size 24267 diff --git a/data/2025/2504_15xxx/2504.15485/images/12bebc6d15e6500bbabf290ea88f67842250f7147b933634b3e9da5cb0518d18.jpg b/data/2025/2504_15xxx/2504.15485/images/12bebc6d15e6500bbabf290ea88f67842250f7147b933634b3e9da5cb0518d18.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4dfd390a91c5d60bc7d4e8648f39464a7802cb3f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/12bebc6d15e6500bbabf290ea88f67842250f7147b933634b3e9da5cb0518d18.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be07154681ab9342c4e58de6eedeb82d9fe3b0e4a26bd381077aa50603eaf24c +size 22688 diff --git a/data/2025/2504_15xxx/2504.15485/images/1bbc7a1297efec4901bf89c6b7bcce563982d0c367188c0e42f7ec57805c29ee.jpg b/data/2025/2504_15xxx/2504.15485/images/1bbc7a1297efec4901bf89c6b7bcce563982d0c367188c0e42f7ec57805c29ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b2f4fc1b18505fd1c5b5da8e50d641427b33033 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/1bbc7a1297efec4901bf89c6b7bcce563982d0c367188c0e42f7ec57805c29ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5940efa097130bcb79383ffef34787a29b87ea94dbe01c7017ee8c8037cee012 +size 15388 diff --git a/data/2025/2504_15xxx/2504.15485/images/219a1166134d1772e966ee56c5d2d9ed4a6b2d6e102dc7a24aa55784ec37d983.jpg b/data/2025/2504_15xxx/2504.15485/images/219a1166134d1772e966ee56c5d2d9ed4a6b2d6e102dc7a24aa55784ec37d983.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e8af2a4fca3347f266e2787daaf9084fc0eea5e4 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/219a1166134d1772e966ee56c5d2d9ed4a6b2d6e102dc7a24aa55784ec37d983.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61cd88af85681b2276f2500e70405e0c787f11ec033ee18cbc9543390c5735b3 +size 28586 diff --git a/data/2025/2504_15xxx/2504.15485/images/4518a5465c290528c213f388056a3af4202975be799d21131dcfbeb4f70ff0f6.jpg b/data/2025/2504_15xxx/2504.15485/images/4518a5465c290528c213f388056a3af4202975be799d21131dcfbeb4f70ff0f6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d2822b837b1e32a72075fa4ac4e4614cea1b432b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/4518a5465c290528c213f388056a3af4202975be799d21131dcfbeb4f70ff0f6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b61f935df65b7f93a5128172393d4c072c666402473421e03d3c6002a482a4eb +size 59769 diff --git a/data/2025/2504_15xxx/2504.15485/images/4dfd3d68fad466d166038fb0040b03ca482ef5b70451ed89cecd58cce73374cb.jpg b/data/2025/2504_15xxx/2504.15485/images/4dfd3d68fad466d166038fb0040b03ca482ef5b70451ed89cecd58cce73374cb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..81a7d07448de64376cb68b4859bd049d8ef1215d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/4dfd3d68fad466d166038fb0040b03ca482ef5b70451ed89cecd58cce73374cb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1dcc57fdbca1ceb130283f875b59a49a8bd39507b5a4fe36f7640bd0eb50929b +size 27692 diff --git a/data/2025/2504_15xxx/2504.15485/images/54a9fb14060fcc4940d0a1d77d308d09d15406b37a1ca1943cd3e02ab33800aa.jpg b/data/2025/2504_15xxx/2504.15485/images/54a9fb14060fcc4940d0a1d77d308d09d15406b37a1ca1943cd3e02ab33800aa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9b874a6b2f4310a051cf9c5e732a2432a287e92d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/54a9fb14060fcc4940d0a1d77d308d09d15406b37a1ca1943cd3e02ab33800aa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0700f63083e2bbab5e905ffff9321b903b775d4c29077d6beecce5929043bea +size 3568 diff --git a/data/2025/2504_15xxx/2504.15485/images/6852ecb13e232280415a379065d7456c9457cca7da26b7d7923a10788f9ddee8.jpg b/data/2025/2504_15xxx/2504.15485/images/6852ecb13e232280415a379065d7456c9457cca7da26b7d7923a10788f9ddee8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..51428a8c323b8915cfcf52cf12af82e245500ab3 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/6852ecb13e232280415a379065d7456c9457cca7da26b7d7923a10788f9ddee8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39bc15aca83ebee48c3a8adda89e156b62f3c06c20846c61c96f0ce4e7968248 +size 18999 diff --git a/data/2025/2504_15xxx/2504.15485/images/6bbc087c1a75124b22630e02712bd7e1f1c4ff2a89de05e3a88dda5c6c31f81a.jpg b/data/2025/2504_15xxx/2504.15485/images/6bbc087c1a75124b22630e02712bd7e1f1c4ff2a89de05e3a88dda5c6c31f81a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9cd931914ff1b598cb81df461e9c1451ac70261f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/6bbc087c1a75124b22630e02712bd7e1f1c4ff2a89de05e3a88dda5c6c31f81a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1e6fec4c1518e9081926ca0a3da3def2a833ef37fb431b63e4e47aa4b216f63 +size 51869 diff --git a/data/2025/2504_15xxx/2504.15485/images/6dcb61ccb0400b9be87716c1ec763a1aee18b1ea03c0a515f3207f2846c30ff6.jpg b/data/2025/2504_15xxx/2504.15485/images/6dcb61ccb0400b9be87716c1ec763a1aee18b1ea03c0a515f3207f2846c30ff6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2e6e4b6ef59db3f4427ab3e8de1b44fb15b33ac0 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/6dcb61ccb0400b9be87716c1ec763a1aee18b1ea03c0a515f3207f2846c30ff6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3979502e72c4f2e5831986bcfa1afa6b5e6d1de1ba40f63d46ed67d4f7da9753 +size 21299 diff --git a/data/2025/2504_15xxx/2504.15485/images/765e128e90cd1a75b722f72d292b40dfaeab439abf0e058ca783f9444768b377.jpg b/data/2025/2504_15xxx/2504.15485/images/765e128e90cd1a75b722f72d292b40dfaeab439abf0e058ca783f9444768b377.jpg new file mode 100644 index 0000000000000000000000000000000000000000..105e14fb9d41dd282d89a9d1a350a29e1cf1194e --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/765e128e90cd1a75b722f72d292b40dfaeab439abf0e058ca783f9444768b377.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e4e9c5cc6e66a1d10365ff0684a8fa6b723d08888d509e89210adb54dff0102 +size 6809 diff --git a/data/2025/2504_15xxx/2504.15485/images/766d3a6289f8b6fdb33abb8032609a92274b1bc67ea432f698f4bdcbf6a1cf2d.jpg b/data/2025/2504_15xxx/2504.15485/images/766d3a6289f8b6fdb33abb8032609a92274b1bc67ea432f698f4bdcbf6a1cf2d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..328808f09fc79f7d5bc6596808c2230e02284e47 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/766d3a6289f8b6fdb33abb8032609a92274b1bc67ea432f698f4bdcbf6a1cf2d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7584b4b237638c5ff2782442d535040cbe1ed7cd5708fb7c26cc4e2c730be6d +size 49434 diff --git a/data/2025/2504_15xxx/2504.15485/images/7ad2418d050b057c9166898787a612277b8a7a6e4806fefeed67729879f8a3f8.jpg b/data/2025/2504_15xxx/2504.15485/images/7ad2418d050b057c9166898787a612277b8a7a6e4806fefeed67729879f8a3f8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d61bb56231bb2df3a4e4825e181e9a4c9786b4d7 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/7ad2418d050b057c9166898787a612277b8a7a6e4806fefeed67729879f8a3f8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcc5f2e98961920618ec51654ed2844c338a1b41805266474255bc915ee2d051 +size 90306 diff --git a/data/2025/2504_15xxx/2504.15485/images/8414fceefb0333f2f72e9353299e00198f69eb76a39b1a315c06104d0376da7a.jpg b/data/2025/2504_15xxx/2504.15485/images/8414fceefb0333f2f72e9353299e00198f69eb76a39b1a315c06104d0376da7a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..66a8b926c99c40f6a762d6035477f1164dfc977f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/8414fceefb0333f2f72e9353299e00198f69eb76a39b1a315c06104d0376da7a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b45baa284e0947dd894add3e9dd1ef4b1de20c4dee40d1e8f44c39da5723e5b4 +size 16628 diff --git a/data/2025/2504_15xxx/2504.15485/images/8578bf7e10fea3496bee51ad25b496e2829784d6fda6ca668364777004884e8c.jpg b/data/2025/2504_15xxx/2504.15485/images/8578bf7e10fea3496bee51ad25b496e2829784d6fda6ca668364777004884e8c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e6ece29a1221d98e01152386b9f42d7d56994f20 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/8578bf7e10fea3496bee51ad25b496e2829784d6fda6ca668364777004884e8c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98c4b8eef83d4d2dfffa52656ad8717734fffed9af283ad0f978894602ee0f25 +size 15438 diff --git a/data/2025/2504_15xxx/2504.15485/images/9049c59347241b0b3b291a226e884daeba5d88430388678bdb27fa8f3cd22b6c.jpg b/data/2025/2504_15xxx/2504.15485/images/9049c59347241b0b3b291a226e884daeba5d88430388678bdb27fa8f3cd22b6c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fec67d11906050eb731176b1009e289f2341e68e --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/9049c59347241b0b3b291a226e884daeba5d88430388678bdb27fa8f3cd22b6c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:024acf908245a362951119d5307326c06a8580bc84b9b8fe5331e41b85df8396 +size 3053 diff --git a/data/2025/2504_15xxx/2504.15485/images/981b3b6aabc89ab61a54e82e52bc90dd504b1f841da1c843d3996fb961f096a9.jpg b/data/2025/2504_15xxx/2504.15485/images/981b3b6aabc89ab61a54e82e52bc90dd504b1f841da1c843d3996fb961f096a9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bffb10bc9fe18ab5b24447d666af287b713b9904 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/981b3b6aabc89ab61a54e82e52bc90dd504b1f841da1c843d3996fb961f096a9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e2cdcfb1bdfa11cc42f072e453dcfca55006556abe4aca3e7757095582cd44d +size 14995 diff --git a/data/2025/2504_15xxx/2504.15485/images/a0b1d18c5608668c32ad94cfb339eefcb95710ae93d724ddeebaa909da2aae77.jpg b/data/2025/2504_15xxx/2504.15485/images/a0b1d18c5608668c32ad94cfb339eefcb95710ae93d724ddeebaa909da2aae77.jpg new file mode 100644 index 0000000000000000000000000000000000000000..58a763c1cf44f0fac86c6421d110b29d34a67e3c --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/a0b1d18c5608668c32ad94cfb339eefcb95710ae93d724ddeebaa909da2aae77.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7cc604c258b0842c27b8c75a7bd5bb3bc0a5b2ae388cd42c25285a63d0bcf416 +size 32708 diff --git a/data/2025/2504_15xxx/2504.15485/images/b2289750d0a6af17eb500803ad640c7b0e5d851d29c011084ed404545dd9bf6b.jpg b/data/2025/2504_15xxx/2504.15485/images/b2289750d0a6af17eb500803ad640c7b0e5d851d29c011084ed404545dd9bf6b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c78df8657fa5acf281de6f4a62575a4628d0e714 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/b2289750d0a6af17eb500803ad640c7b0e5d851d29c011084ed404545dd9bf6b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12597d7e9fa43bda5628631a9935e3317b0731e289f581f93bc8c834758cdae1 +size 29871 diff --git a/data/2025/2504_15xxx/2504.15485/images/b5bdfdd2908acbf8cdef268b0abf95ce0fe53e686332d8e190da4bf0e24212c7.jpg b/data/2025/2504_15xxx/2504.15485/images/b5bdfdd2908acbf8cdef268b0abf95ce0fe53e686332d8e190da4bf0e24212c7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..653b17e1e626d66fce60c0501814dbca2249c5b7 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/b5bdfdd2908acbf8cdef268b0abf95ce0fe53e686332d8e190da4bf0e24212c7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8f5f319346f0a05c57c54e4f015403df29f8c9b0dc7fdd2de8206c1208c1c20 +size 12498 diff --git a/data/2025/2504_15xxx/2504.15485/images/b764ec1f7d7b2599e57b7f0c6a90c64e02b3a7d1446773a3ab1bb37a3fede70d.jpg b/data/2025/2504_15xxx/2504.15485/images/b764ec1f7d7b2599e57b7f0c6a90c64e02b3a7d1446773a3ab1bb37a3fede70d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..621c8f93f066fcc2e5c4730f43edc8d8f635700c --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/b764ec1f7d7b2599e57b7f0c6a90c64e02b3a7d1446773a3ab1bb37a3fede70d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6409428b656bc1cac873d69de9e1346ea8960c12119d21619ab4f902bbf6c73a +size 48361 diff --git a/data/2025/2504_15xxx/2504.15485/images/b7f8ce3847d0a7e3146846c8edff7e0d3871512192406b9fb9b188c9cf8bf053.jpg b/data/2025/2504_15xxx/2504.15485/images/b7f8ce3847d0a7e3146846c8edff7e0d3871512192406b9fb9b188c9cf8bf053.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc3514a714b701e617937d5534be76dbd4c24e39 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/b7f8ce3847d0a7e3146846c8edff7e0d3871512192406b9fb9b188c9cf8bf053.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8df02c81bced5fd5ad8686ff940adeb47fda547a3d045bf2edb017711dcebf52 +size 11568 diff --git a/data/2025/2504_15xxx/2504.15485/images/bc42a00ebe464d45c0fda199fa3866d224671941005433a1ffed10ada4f7fc65.jpg b/data/2025/2504_15xxx/2504.15485/images/bc42a00ebe464d45c0fda199fa3866d224671941005433a1ffed10ada4f7fc65.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6dfdc419fe3003defcbd2383a39f3b8c37d9d24f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/bc42a00ebe464d45c0fda199fa3866d224671941005433a1ffed10ada4f7fc65.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cc67ccb5aebd6139ac7986a8c3b3bf408e32b662eff2ec22455a42f1c1e79c8 +size 50526 diff --git a/data/2025/2504_15xxx/2504.15485/images/c611c7dddf48809c8a6e0d57ebe548a151fb1cc3df349e754f8ed43ae455fa99.jpg b/data/2025/2504_15xxx/2504.15485/images/c611c7dddf48809c8a6e0d57ebe548a151fb1cc3df349e754f8ed43ae455fa99.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ccdfcc74d13551a179c919e141328f6becbe802b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/c611c7dddf48809c8a6e0d57ebe548a151fb1cc3df349e754f8ed43ae455fa99.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd41a0dec008de9a07e846d9b2295a42ecfc905420849029054f6f6536b3bd88 +size 22692 diff --git a/data/2025/2504_15xxx/2504.15485/images/c9dcd23e293902294ad0bb304302126a7e2ee754cdd92911fd7e5aabcfc11d68.jpg b/data/2025/2504_15xxx/2504.15485/images/c9dcd23e293902294ad0bb304302126a7e2ee754cdd92911fd7e5aabcfc11d68.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ce5d90c2e74cabf8f59e20b11a5dccd1a46a851d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/c9dcd23e293902294ad0bb304302126a7e2ee754cdd92911fd7e5aabcfc11d68.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6bb9f66dca7812cd22241298fefcd7e0aba5b1a9f4a621db3f43df4c030eb4c +size 14502 diff --git a/data/2025/2504_15xxx/2504.15485/images/ce5800214bd41b55746d8d1c7787aaa4f4fc6e766570fdd0f051c236d58c6c8e.jpg b/data/2025/2504_15xxx/2504.15485/images/ce5800214bd41b55746d8d1c7787aaa4f4fc6e766570fdd0f051c236d58c6c8e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b55cf4869b3d256747117dda8d744d99c2b08f3 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/ce5800214bd41b55746d8d1c7787aaa4f4fc6e766570fdd0f051c236d58c6c8e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa9074f27f47991e2489dfa491ec2ca1c77beebf91091af598f3d73908a02ba8 +size 29036 diff --git a/data/2025/2504_15xxx/2504.15485/images/d1ff88f8e06685dc0b485224cacfa612ed34e24c8db13ff474eb31b034de7999.jpg b/data/2025/2504_15xxx/2504.15485/images/d1ff88f8e06685dc0b485224cacfa612ed34e24c8db13ff474eb31b034de7999.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1a5697ec366aec2d2761565d658eba2072066586 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/d1ff88f8e06685dc0b485224cacfa612ed34e24c8db13ff474eb31b034de7999.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bf7cd43f50b3c7252cd4c507b2f3ddcc117897ab1f9dca512d556d7dfa54733 +size 22903 diff --git a/data/2025/2504_15xxx/2504.15485/images/d862b1fc3435e11a6f995250f084863846b2a1867e3e95f87cda3d508c3650df.jpg b/data/2025/2504_15xxx/2504.15485/images/d862b1fc3435e11a6f995250f084863846b2a1867e3e95f87cda3d508c3650df.jpg new file mode 100644 index 0000000000000000000000000000000000000000..548315cc91cd7294ebac9b4dae64f7ffbdc831a7 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/d862b1fc3435e11a6f995250f084863846b2a1867e3e95f87cda3d508c3650df.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d8a598d9a55ee3d174021862996c646f95788c7363644898f0bfde662fa5383c +size 16113 diff --git a/data/2025/2504_15xxx/2504.15485/images/f88aa8f349d24311721ebe76c11f179973d4ac74f16fc9cbc88fd681cbc49a5d.jpg b/data/2025/2504_15xxx/2504.15485/images/f88aa8f349d24311721ebe76c11f179973d4ac74f16fc9cbc88fd681cbc49a5d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..907dde12b50699d311ee22ba3c0f6c75bf88cca4 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/f88aa8f349d24311721ebe76c11f179973d4ac74f16fc9cbc88fd681cbc49a5d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e322fcd64273e36463ffcb0ccc7a687ddf43a5d010fec32b5416a3110cddedf +size 6394 diff --git a/data/2025/2504_15xxx/2504.15485/images/fdce1f292eb8c63e5d8886496c488350aad21c286ba6461c0abc5dfe7d1ccd31.jpg b/data/2025/2504_15xxx/2504.15485/images/fdce1f292eb8c63e5d8886496c488350aad21c286ba6461c0abc5dfe7d1ccd31.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c80c915f2a15f4e0b2bcf062275130948e77b7b5 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/fdce1f292eb8c63e5d8886496c488350aad21c286ba6461c0abc5dfe7d1ccd31.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af7d68babe004357b982bf1102ad0d63bd9c8dad7df2aab58d6b96be69f04710 +size 17894 diff --git a/data/2025/2504_15xxx/2504.15485/images/fe55be050932617254da6354bbc993dec70850b06690d96aa4514239ea7aaef4.jpg b/data/2025/2504_15xxx/2504.15485/images/fe55be050932617254da6354bbc993dec70850b06690d96aa4514239ea7aaef4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ef2216789e07c9f081c6165a0b2847aac2b80ca1 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/images/fe55be050932617254da6354bbc993dec70850b06690d96aa4514239ea7aaef4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d3034cbd894ae9d4c88a6919deca7949a589b732f0c1c3d1e6baf30a29cd8d1 +size 6405 diff --git a/data/2025/2504_15xxx/2504.15485/layout.json b/data/2025/2504_15xxx/2504.15485/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..82f8ad60cad8d0b297f8ed1f16c0ac6c40dfc7a6 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15485/layout.json @@ -0,0 +1,11652 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 88, + 102, + 523, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 102, + 523, + 140 + ], + "spans": [ + { + "bbox": [ + 88, + 102, + 523, + 140 + ], + "type": "text", + "content": "CAPTURE: Evaluating Spatial Reasoning in Vision Language Models via Occluded Object Counting" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 124, + 161, + 189, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 161, + 189, + 175 + ], + "spans": [ + { + "bbox": [ + 124, + 161, + 189, + 175 + ], + "type": "text", + "content": "Atin Pothiraj" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 214, + 161, + 310, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 161, + 310, + 175 + ], + "spans": [ + { + "bbox": [ + 214, + 161, + 310, + 175 + ], + "type": "text", + "content": "Elias Stengel-Eskin" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 335, + 162, + 393, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 162, + 393, + 175 + ], + "spans": [ + { + "bbox": [ + 335, + 162, + 393, + 175 + ], + "type": "text", + "content": "Jaemin Cho" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 418, + 162, + 485, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 418, + 162, + 485, + 174 + ], + "spans": [ + { + "bbox": [ + 418, + 162, + 485, + 174 + ], + "type": "text", + "content": "Mohit Bansal" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 262, + 176, + 348, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 262, + 176, + 348, + 190 + ], + "spans": [ + { + "bbox": [ + 262, + 176, + 348, + 190 + ], + "type": "text", + "content": "UNC Chapel Hill" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 198, + 191, + 430, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 191, + 430, + 203 + ], + "spans": [ + { + "bbox": [ + 198, + 191, + 430, + 203 + ], + "type": "text", + "content": "{atin, esteng, jmincho, mbansal}@cs.unc.edu" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 152, + 231, + 200, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 231, + 200, + 243 + ], + "spans": [ + { + "bbox": [ + 152, + 231, + 200, + 243 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 256, + 296, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 256, + 296, + 639 + ], + "spans": [ + { + "bbox": [ + 53, + 256, + 296, + 639 + ], + "type": "text", + "content": "Recognizing and reasoning about occluded (partially or fully hidden) objects is vital to understanding visual scenes, as occlusions frequently occur in real-world environments and act as obstacles for spatial comprehension. To test models' ability to reason about multiple occluded objects, we introduce a novel task, Counting Amodally for Patterns Through Unseen REgions (CAPTURE), which requires a model to count objects arranged in a pattern by inferring how the pattern continues behind an occluder (an object which blocks parts of the scene). CAPTURE requires both recognizing visual patterns and reasoning, making it a useful testbed for evaluating vision-language models (VLMs) on whether they understand occluded patterns and possess spatial understanding skills. By requiring models to reason about occluded objects, CAPTURE also tests VLMs' ability to form world models that would allow them to fill in missing information. CAPTURE consists of two parts: (1) CAPTURE" + }, + { + "bbox": [ + 53, + 256, + 296, + 639 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 53, + 256, + 296, + 639 + ], + "type": "text", + "content": ", with manually filtered images of real objects in patterns and (2) CAPTURE" + }, + { + "bbox": [ + 53, + 256, + 296, + 639 + ], + "type": "inline_equation", + "content": "^{\\text{synthetic}}" + }, + { + "bbox": [ + 53, + 256, + 296, + 639 + ], + "type": "text", + "content": ", a controlled diagnostic with generated patterned images. We evaluate four strong VLMs (GPT-4o, Intern-VL2, Molmo, and Qwen2-VL) on CAPTURE, finding that models struggle to count on both occluded and unoccluded patterns. Crucially, we find that models perform worse with occlusion, suggesting that VLMs are also deficient in inferring unseen spatial relationships: even the strongest VLMs like GPT-4o fail to count with occlusion. In contrast, we find that humans achieve very little error on CAPTURE. We also find that providing auxiliary information of occluded object locations increases performance, underscoring that the model error comes both from an inability to handle occlusion as well as difficulty in counting in images." + }, + { + "bbox": [ + 53, + 256, + 296, + 639 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 654, + 135, + 666 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 654, + 135, + 666 + ], + "spans": [ + { + "bbox": [ + 56, + 654, + 135, + 666 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 671, + 295, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 671, + 295, + 696 + ], + "spans": [ + { + "bbox": [ + 55, + 671, + 295, + 696 + ], + "type": "text", + "content": "Inferring what lies behind different objects in occluded scenes is crucial for human perception, as it allows us to" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 316, + 230, + 424, + 344 + ], + "blocks": [ + { + "bbox": [ + 316, + 230, + 424, + 344 + ], + "lines": [ + { + "bbox": [ + 316, + 230, + 424, + 344 + ], + "spans": [ + { + "bbox": [ + 316, + 230, + 424, + 344 + ], + "type": "image", + "image_path": "981b3b6aabc89ab61a54e82e52bc90dd504b1f841da1c843d3996fb961f096a9.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 320, + 346, + 419, + 380 + ], + "lines": [ + { + "bbox": [ + 320, + 346, + 419, + 380 + ], + "spans": [ + { + "bbox": [ + 320, + 346, + 419, + 380 + ], + "type": "text", + "content": "Instruction: Count the exact number of cups in the image, assuming the pattern continues behind the black box." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 424, + 230, + 539, + 383 + ], + "blocks": [ + { + "bbox": [ + 424, + 230, + 539, + 383 + ], + "lines": [ + { + "bbox": [ + 424, + 230, + 539, + 383 + ], + "spans": [ + { + "bbox": [ + 424, + 230, + 539, + 383 + ], + "type": "image", + "image_path": "219a1166134d1772e966ee56c5d2d9ed4a6b2d6e102dc7a24aa55784ec37d983.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 392, + 555, + 437 + ], + "lines": [ + { + "bbox": [ + 313, + 392, + 555, + 437 + ], + "spans": [ + { + "bbox": [ + 313, + 392, + 555, + 437 + ], + "type": "text", + "content": "Figure 1. CAPTURE example with an output from GPT-4o. While people can easily infer the missing number of cups and correctly reason over occluded patterns, models generally struggle to reason over these occluded scenes." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 449, + 555, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 449, + 555, + 653 + ], + "spans": [ + { + "bbox": [ + 313, + 449, + 555, + 653 + ], + "type": "text", + "content": "maintain a coherent understanding of our environment even when parts are hidden. The human visual system accomplishes this by integrating past experiences, context, and sensory inputs to reconstruct incomplete scenes [19, 27, 30, 45]. Meanwhile, recent advancements in vision-language models (VLMs) – especially in terms of visual and spatial reasoning – raise the question of whether these systems can perform similar inferential tasks. One way of measuring such capabilities is through amodal completion – the task of inferring the invisible parts of partially occluded objects; here, vision-only models are typically evaluated via dense prediction tasks like object segmentation and image inpainting [5]. However, this format is not well-suited for assessing VLMs, whose outputs consist of text tokens rather than pixel-level predictions. This raises a critical question: How can we quantify the ability of VLMs to form spatial world modeling [17] in the presence of occlusion?" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 653, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 653, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 653, + 556, + 715 + ], + "type": "text", + "content": "To address this, we introduce CAPTURE, Counting Amodally for Patterns Through Unseen REgions, a novel benchmark that tests a VLM's world modeling and spatial reasoning abilities through the task of amodal counting, where models are prompted to count occluded objects" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 217, + 37, + 572 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 217, + 37, + 572 + ], + "spans": [ + { + "bbox": [ + 14, + 217, + 37, + 572 + ], + "type": "text", + "content": "arXiv:2504.15485v2 [cs.CV] 13 Aug 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 702, + 258, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 702, + 258, + 712 + ], + "spans": [ + { + "bbox": [ + 66, + 702, + 258, + 712 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 66, + 702, + 258, + 712 + ], + "type": "text", + "content": "Code and data: https://github.com/atinpothiraj/CAPTURE" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 262 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 262 + ], + "type": "text", + "content": "by amodally completing a pattern. CAPTURE focuses on counting as it provides an objective and easy-to-verify output by comparing predicted counts with ground truth values. Moreover, patterned objects appear in various real-world domains, especially in man-made environments like parking lots, cities, and warehouses, where counting objects is often required. Fig. 1 illustrates the CAPTURE task. We show a VLM an image where objects are placed in a regular pattern (e.g., a 4x4 grid) with some objects occluded, and ask the model to count the total number of objects in the image assuming that the pattern continues behind the occlusion. The task requires handling occlusion, pattern recognition, and counting skills that exist in humans from a fairly young age [27, 30, 45], thus humans can easily answer such questions – indeed, we find that people can complete CAPTURE tasks with almost no error." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 266, + 294, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 266, + 294, + 410 + ], + "spans": [ + { + "bbox": [ + 55, + 266, + 294, + 410 + ], + "type": "text", + "content": "CAPTURE consists of two subsets: " + }, + { + "bbox": [ + 55, + 266, + 294, + 410 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{real}}" + }, + { + "bbox": [ + 55, + 266, + 294, + 410 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 266, + 294, + 410 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{synthetic}}" + }, + { + "bbox": [ + 55, + 266, + 294, + 410 + ], + "type": "text", + "content": ". As shown in Fig. 2, " + }, + { + "bbox": [ + 55, + 266, + 294, + 410 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{real}}" + }, + { + "bbox": [ + 55, + 266, + 294, + 410 + ], + "type": "text", + "content": " contains real-world images and tests the ability of models to perform amodal counting in naturalistic contexts, while " + }, + { + "bbox": [ + 55, + 266, + 294, + 410 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{synthetic}}" + }, + { + "bbox": [ + 55, + 266, + 294, + 410 + ], + "type": "text", + "content": " allows us to analyze specific factors by controlling different variables like color, shape, and number of objects. All images in " + }, + { + "bbox": [ + 55, + 266, + 294, + 410 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}" + }, + { + "bbox": [ + 55, + 266, + 294, + 410 + ], + "type": "text", + "content": " contain a pattern of objects and a manually annotated occluding black box covering some objects. " + }, + { + "bbox": [ + 55, + 266, + 294, + 410 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{real}}" + }, + { + "bbox": [ + 55, + 266, + 294, + 410 + ], + "type": "text", + "content": " contains 924 images with a diverse range of settings and objects, covering 92 different object types, while " + }, + { + "bbox": [ + 55, + 266, + 294, + 410 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{synthetic}}" + }, + { + "bbox": [ + 55, + 266, + 294, + 410 + ], + "type": "text", + "content": " contains 1250 images across multiple attribute classes." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 411, + 295, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 411, + 295, + 616 + ], + "spans": [ + { + "bbox": [ + 55, + 411, + 295, + 616 + ], + "type": "text", + "content": "By combining vision encoders with large language models (LLMs), VLMs have the potential to reason in a zero-shot way about visual inputs. To put this ability to the test and measure VLMs' ability to reason about missing visual information, we evaluate four strong recent VLMs (GPT40, InternVL2, Molmo, and Qwen2VL) on CAPTURE. Our experiment results (Sec. 4) show that models generally struggle with the multiple aspects of the task, with high error rates on both CAPTURE" + }, + { + "bbox": [ + 55, + 411, + 295, + 616 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 55, + 411, + 295, + 616 + ], + "type": "text", + "content": " and CAPTURE" + }, + { + "bbox": [ + 55, + 411, + 295, + 616 + ], + "type": "inline_equation", + "content": "^{\\text{synthetic}}" + }, + { + "bbox": [ + 55, + 411, + 295, + 616 + ], + "type": "text", + "content": " for occluded and unoccluded images. In contrast, we find that humans can perform the task easily: whereas model performance deteriorates as more objects in images are occluded, humans complete the task almost perfectly. We also compare VLMs to a vision-only model trained to count visible objects; while this model generally outperforms VLMs, its error is directly tied to the number of occluded objects – the more objects are occluded, the higher its error will be." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 618, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 618, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 618, + 295, + 714 + ], + "type": "text", + "content": "By objectively measuring VLMs' spatial reasoning capabilities under occlusion, CAPTURE highlights an unexpected weakness in VLMs. We analyze this weakness by providing the model with additional clues and information. Specifically, we test to what degree the VLMs' failure stems from an inability to integrate visual information by providing it with a text-based representation of the visible objects in the image in the form of object coordinates; here, VLMs" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 313, + 72, + 553, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 289 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 289 + ], + "type": "text", + "content": "perform substantially better, indicating that their poor performance on CAPTURE stems partly from an inability to count objects in images, rather than an inability to count more generally. Our findings align with previous work, which similarly finds that VLMs struggle to count in images [22, 33, 42]. We also test the degree to which VLM errors stem from an inability to form a world model by providing it with auxiliary information (the coordinates of the occluded objects in text, or inpainting the occluded regions). We find that VLMs perform substantially better with this auxiliary information, suggesting that VLMs are partly limited by their inability to imagine the missing visual information. Addressing these gaps is critical for VLMs to function effectively in real-world scenarios, where visual reasoning often involves occlusions – whether counting stadium seats, components on production lines, or buildings in neighborhoods. We hope that our work will foster future research on improving the world modeling capabilities of VLMs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 314, + 299, + 389, + 311 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 299, + 389, + 311 + ], + "spans": [ + { + "bbox": [ + 314, + 299, + 389, + 311 + ], + "type": "text", + "content": "2.CAPTURE" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 319, + 406, + 331 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 319, + 406, + 331 + ], + "spans": [ + { + "bbox": [ + 313, + 319, + 406, + 331 + ], + "type": "text", + "content": "2.1. Task Overview" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 337, + 553, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 337, + 553, + 624 + ], + "spans": [ + { + "bbox": [ + 313, + 337, + 553, + 624 + ], + "type": "text", + "content": "Input/output formulation. CAPTURE tests VLMs on occlusion reasoning, pattern recognition, and counting of both visible and occluded objects. VLMs already achieve high accuracy in classifying single, occluded objects [20]. Thus, we also argue that VLMs have the potential to perform well on CAPTURE's challenging task because their proficiency in handling occlusion ought to enable them to recognize occluded objects and reason accordingly. All images in CAPTURE contain a pattern. This makes the task solvable for models and people - if the objects were not placed in a pattern, it would be unreasonable to expect models to infer the position of the occluded objects. For example, given an image of a random pile of coins with a region occluded, it is not easy to infer whether the occluded region contains no coins or contains roughly the same amount as the rest of the pile. For this task, the patterns considered are all regular and fairly small, e.g. grids, circles, triangles, and other regular shapes - see Fig. 2 for further examples. The last step of CAPTURE is counting, asking the model to provide an objectively measurable output. In addition to VLMs, we also test COUNTGD [3], a state-of-the-art object detection-based counting method, finding that it fails to account for the occluded scenario, as its training entails solely predicting the visible, unoccluded objects in the image." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 631, + 553, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 631, + 553, + 656 + ], + "spans": [ + { + "bbox": [ + 313, + 631, + 553, + 656 + ], + "type": "text", + "content": "Metric. We use symmetric mean percent error (sMAPE) as the primary metric. sMAPE is given by:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 365, + 665, + 553, + 696 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 665, + 553, + 696 + ], + "spans": [ + { + "bbox": [ + 365, + 665, + 553, + 696 + ], + "type": "interline_equation", + "content": "\\mathrm {s M A P E} = 1 0 0 \\cdot \\frac {1}{n} \\sum_ {i = 1} ^ {n} \\frac {\\left| y _ {i} - \\hat {y} _ {i} \\right|}{\\left| y _ {i} \\right| + \\left| \\hat {y} _ {i} \\right|} \\tag {1}", + "image_path": "f88aa8f349d24311721ebe76c11f179973d4ac74f16fc9cbc88fd681cbc49a5d.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 702, + 553, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 702, + 553, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 702, + 553, + 714 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 702, + 553, + 714 + ], + "type": "inline_equation", + "content": "y_{i}" + }, + { + "bbox": [ + 313, + 702, + 553, + 714 + ], + "type": "text", + "content": " represents the actual values, " + }, + { + "bbox": [ + 313, + 702, + 553, + 714 + ], + "type": "inline_equation", + "content": "\\hat{y}_i" + }, + { + "bbox": [ + 313, + 702, + 553, + 714 + ], + "type": "text", + "content": " represents the pre" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 165, + 71, + 209, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 165, + 71, + 209, + 79 + ], + "spans": [ + { + "bbox": [ + 165, + 71, + 209, + 79 + ], + "type": "text", + "content": "CAPTUREreal" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 80, + 306, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 80, + 306, + 106 + ], + "spans": [ + { + "bbox": [ + 61, + 80, + 306, + 106 + ], + "type": "text", + "content": "Instruction: Count the exact number of [object] in the image. Assume the pattern of [object] continues behind any black box. Provide the total number of [object] as if the black box were not there." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 59, + 108, + 137, + 169 + ], + "blocks": [ + { + "bbox": [ + 59, + 108, + 137, + 169 + ], + "lines": [ + { + "bbox": [ + 59, + 108, + 137, + 169 + ], + "spans": [ + { + "bbox": [ + 59, + 108, + 137, + 169 + ], + "type": "image", + "image_path": "0320e46c96b593a455fec0826baa3f421db6d2c4a95077806655be22fb113a09.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 59, + 171, + 137, + 215 + ], + "lines": [ + { + "bbox": [ + 59, + 171, + 137, + 215 + ], + "spans": [ + { + "bbox": [ + 59, + 171, + 137, + 215 + ], + "type": "text", + "content": "GPT-4o: 18 bottle caps visible...black box covers...5 caps...total estimated count...is 23. Ground truth: 19" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 139, + 108, + 241, + 170 + ], + "blocks": [ + { + "bbox": [ + 139, + 108, + 241, + 170 + ], + "lines": [ + { + "bbox": [ + 139, + 108, + 241, + 170 + ], + "spans": [ + { + "bbox": [ + 139, + 108, + 241, + 170 + ], + "type": "image", + "image_path": "b7f8ce3847d0a7e3146846c8edff7e0d3871512192406b9fb9b188c9cf8bf053.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 147, + 172, + 228, + 216 + ], + "lines": [ + { + "bbox": [ + 147, + 172, + 228, + 216 + ], + "spans": [ + { + "bbox": [ + 147, + 172, + 228, + 216 + ], + "type": "text", + "content": "GPT-4o: Bread rolls are organized in a 4x4 grid, which suggests there are 16 rolls in total. Ground truth: 20" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 242, + 171, + 313, + 223 + ], + "lines": [ + { + "bbox": [ + 242, + 171, + 313, + 223 + ], + "spans": [ + { + "bbox": [ + 242, + 171, + 313, + 223 + ], + "type": "text", + "content": "GPT-4o: Total nuts in each row...6 times 6 = 36. Nuts in the hidden " + }, + { + "bbox": [ + 242, + 171, + 313, + 223 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 242, + 171, + 313, + 223 + ], + "type": "text", + "content": " section = 9. 36 + 9 = 45 nuts. Ground truth: 42" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 110, + 240, + 499, + 253 + ], + "lines": [ + { + "bbox": [ + 110, + 240, + 499, + 253 + ], + "spans": [ + { + "bbox": [ + 110, + 240, + 499, + 253 + ], + "type": "text", + "content": "Figure 2. Example images with GPT-4o responses to CAPTURE" + }, + { + "bbox": [ + 110, + 240, + 499, + 253 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 110, + 240, + 499, + 253 + ], + "type": "text", + "content": " and CAPTURE" + }, + { + "bbox": [ + 110, + 240, + 499, + 253 + ], + "type": "inline_equation", + "content": "^{\\text{synthetic}}" + }, + { + "bbox": [ + 110, + 240, + 499, + 253 + ], + "type": "text", + "content": " occluded splits." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 245, + 108, + 309, + 170 + ], + "blocks": [ + { + "bbox": [ + 245, + 108, + 309, + 170 + ], + "lines": [ + { + "bbox": [ + 245, + 108, + 309, + 170 + ], + "spans": [ + { + "bbox": [ + 245, + 108, + 309, + 170 + ], + "type": "image", + "image_path": "fe55be050932617254da6354bbc993dec70850b06690d96aa4514239ea7aaef4.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 408, + 70, + 462, + 79 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 408, + 70, + 462, + 79 + ], + "spans": [ + { + "bbox": [ + 408, + 70, + 462, + 79 + ], + "type": "text", + "content": "CAPTUREsynthetic" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 314, + 80, + 541, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 80, + 541, + 107 + ], + "spans": [ + { + "bbox": [ + 314, + 80, + 541, + 107 + ], + "type": "text", + "content": "Instruction: Your task is to count objects in the image. Assume the pattern of [object] continues behind the black box. First, state what the pattern is, then give your final count." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 320, + 110, + 388, + 169 + ], + "blocks": [ + { + "bbox": [ + 320, + 110, + 388, + 169 + ], + "lines": [ + { + "bbox": [ + 320, + 110, + 388, + 169 + ], + "spans": [ + { + "bbox": [ + 320, + 110, + 388, + 169 + ], + "type": "image", + "image_path": "54a9fb14060fcc4940d0a1d77d308d09d15406b37a1ca1943cd3e02ab33800aa.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 318, + 171, + 394, + 215 + ], + "lines": [ + { + "bbox": [ + 318, + 171, + 394, + 215 + ], + "spans": [ + { + "bbox": [ + 318, + 171, + 394, + 215 + ], + "type": "text", + "content": "GPT-4o: Pattern: A circular arrangement... Visible count: ...8 ... Total count: ...12 dots. Ground truth: 11" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 394, + 111, + 474, + 168 + ], + "blocks": [ + { + "bbox": [ + 394, + 111, + 474, + 168 + ], + "lines": [ + { + "bbox": [ + 394, + 111, + 474, + 168 + ], + "spans": [ + { + "bbox": [ + 394, + 111, + 474, + 168 + ], + "type": "image", + "image_path": "037e79bd08c36fe6b80824bcb0caf4643a0ad47db6c01c322d5a57de9b716faa.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 395, + 171, + 475, + 232 + ], + "lines": [ + { + "bbox": [ + 395, + 171, + 475, + 232 + ], + "spans": [ + { + "bbox": [ + 395, + 171, + 475, + 232 + ], + "type": "text", + "content": "GPT-4o: ...pattern consists of...a 2x2 grid... can infer...an additional four blue squares in the obscured section. Final count: 8 blue squares Ground truth: 6" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 481, + 110, + 547, + 168 + ], + "blocks": [ + { + "bbox": [ + 481, + 110, + 547, + 168 + ], + "lines": [ + { + "bbox": [ + 481, + 110, + 547, + 168 + ], + "spans": [ + { + "bbox": [ + 481, + 110, + 547, + 168 + ], + "type": "image", + "image_path": "9049c59347241b0b3b291a226e884daeba5d88430388678bdb27fa8f3cd22b6c.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 478, + 171, + 553, + 232 + ], + "lines": [ + { + "bbox": [ + 478, + 171, + 553, + 232 + ], + "spans": [ + { + "bbox": [ + 478, + 171, + 553, + 232 + ], + "type": "text", + "content": "GPT-4o: Pattern: Triangular arrangement ...Final Count:7 red dots (5 visible " + }, + { + "bbox": [ + 478, + 171, + 553, + 232 + ], + "type": "inline_equation", + "content": "^+" + }, + { + "bbox": [ + 478, + 171, + 553, + 232 + ], + "type": "text", + "content": " 2 estimated behind the black box). Ground truth:6" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 55, + 273, + 296, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 273, + 296, + 345 + ], + "spans": [ + { + "bbox": [ + 55, + 273, + 296, + 345 + ], + "type": "text", + "content": "dicted values, and " + }, + { + "bbox": [ + 55, + 273, + 296, + 345 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 55, + 273, + 296, + 345 + ], + "type": "text", + "content": " is the number of observations. sMAPE is capped at " + }, + { + "bbox": [ + 55, + 273, + 296, + 345 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 55, + 273, + 296, + 345 + ], + "type": "text", + "content": ", providing a fixed range. This makes sMAPE ideal for challenging tasks like ours, as we can penalize responses that fail to produce an answer with a maximum error of " + }, + { + "bbox": [ + 55, + 273, + 296, + 345 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 55, + 273, + 296, + 345 + ], + "type": "text", + "content": ". For a justification of sMAPE over other metrics, see Appendix A.1." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 55, + 353, + 115, + 364 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 353, + 115, + 364 + ], + "spans": [ + { + "bbox": [ + 55, + 353, + 115, + 364 + ], + "type": "text", + "content": "2.2. Dataset" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 54, + 370, + 295, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 370, + 295, + 550 + ], + "spans": [ + { + "bbox": [ + 54, + 370, + 295, + 550 + ], + "type": "text", + "content": "CAPTURE" + }, + { + "bbox": [ + 54, + 370, + 295, + 550 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 54, + 370, + 295, + 550 + ], + "type": "text", + "content": ". We introduce a set of real images with patterns to test amodal counting in naturalistic settings. The original images and annotations come from the FSC-147 dataset [37], a diverse counting dataset with manual annotations for the number of target objects and all object bounding boxes in each image. FSC-147 contains a diverse array of objects, with 6146 real-world images across 147 object categories. We filter FSC-147 for images that contain identifiable and regular patterns of objects and manually overlay a black box to occlude some objects, resulting in 924 images. Filtering is first performed with GPT-4o and then manually verified; we also manually verify that determining objects despite the occlusion is feasible. For each example, we maintain both occluded and unoccluded versions. Further details on CAPTURE" + }, + { + "bbox": [ + 54, + 370, + 295, + 550 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 54, + 370, + 295, + 550 + ], + "type": "text", + "content": " can be found in Appendix B." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 55, + 556, + 295, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 556, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 55, + 556, + 295, + 689 + ], + "type": "text", + "content": "CAPTUREsynthetic. While CAPTUREreal makes CAPTURE more applicable to real-world scenarios, each image is unique, making the data less controlled and challenging to draw clear conclusions about model performance. Images without background distractors, texture variance, and other potential visual obstacles provide a more controlled version of the task. Therefore, we create CAPTUREsynthetic to examine the task in a fully controlled environment. CAPTUREsynthetic comprises 1250 images of simple objects in patterns, where different variables are held constant or changed. We vary the following features:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 55, + 689, + 220, + 713 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 55, + 689, + 211, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 211, + 700 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 211, + 700 + ], + "type": "text", + "content": "1. Object count: varies from 5 to 15." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 55, + 701, + 220, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 701, + 220, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 701, + 220, + 713 + ], + "type": "text", + "content": "2. Object: can be either dots or squares." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "text" + }, + { + "type": "table", + "bbox": [ + 317, + 270, + 553, + 358 + ], + "blocks": [ + { + "bbox": [ + 317, + 270, + 553, + 358 + ], + "lines": [ + { + "bbox": [ + 317, + 270, + 553, + 358 + ], + "spans": [ + { + "bbox": [ + 317, + 270, + 553, + 358 + ], + "type": "table", + "html": "
CAPTURErealCAPTUREsynthetic
# Images9241250
# Object Types922
Avg. Occluded Obj.13.972.73
Avg. Total Obj.61.4510.00
StrengthsDiverse Objects/SettingsConfounder-free
NaturalisticControllable Attributes
Realistic ContextUniformly Distributed
", + "image_path": "4dfd3d68fad466d166038fb0040b03ca482ef5b70451ed89cecd58cce73374cb.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "table_body" + } + ], + "index": 24 + }, + { + "bbox": [ + 335, + 366, + 533, + 378 + ], + "lines": [ + { + "bbox": [ + 335, + 366, + 533, + 378 + ], + "spans": [ + { + "bbox": [ + 335, + 366, + 533, + 378 + ], + "type": "text", + "content": "Table 1. Statistics and strengths for CAPTURE splits." + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 396, + 553, + 468 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 313, + 396, + 553, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 396, + 553, + 419 + ], + "spans": [ + { + "bbox": [ + 313, + 396, + 553, + 419 + ], + "type": "text", + "content": "3. Arrangement/shape: can be a rectangle, circle, or pyramid (where feasible based on object count)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 313, + 420, + 553, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 420, + 553, + 443 + ], + "spans": [ + { + "bbox": [ + 313, + 420, + 553, + 443 + ], + "type": "text", + "content": "4. Location: we consider five positions on the page: center, top-left, top-right, bottom-left, or bottom-right." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 313, + 444, + 553, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 444, + 553, + 468 + ], + "spans": [ + { + "bbox": [ + 313, + 444, + 553, + 468 + ], + "type": "text", + "content": "5. Color: we randomly choose one of 5 colors for all the objects in an image." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 471, + 554, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 471, + 554, + 507 + ], + "spans": [ + { + "bbox": [ + 313, + 471, + 554, + 507 + ], + "type": "text", + "content": "The " + }, + { + "bbox": [ + 313, + 471, + 554, + 507 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{synthetic}}" + }, + { + "bbox": [ + 313, + 471, + 554, + 507 + ], + "type": "text", + "content": " data is split similarly to the " + }, + { + "bbox": [ + 313, + 471, + 554, + 507 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{real}}" + }, + { + "bbox": [ + 313, + 471, + 554, + 507 + ], + "type": "text", + "content": " data; each configuration has a variant with an overlaid occluding box and one without." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 313, + 525, + 448, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 525, + 448, + 538 + ], + "spans": [ + { + "bbox": [ + 313, + 525, + 448, + 538 + ], + "type": "text", + "content": "2.3. Statistics and Examples" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 312, + 545, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 545, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 312, + 545, + 555, + 713 + ], + "type": "text", + "content": "Fig. 2 shows examples from " + }, + { + "bbox": [ + 312, + 545, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{real}}" + }, + { + "bbox": [ + 312, + 545, + 555, + 713 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 312, + 545, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{synthetic}}" + }, + { + "bbox": [ + 312, + 545, + 555, + 713 + ], + "type": "text", + "content": " paired with their corresponding answers from GPT-4o and their ground truth answers. These examples show the range of objects and patterns in the dataset and highlight the task's feasibility for humans. Tab. 1 reports summary statistics for " + }, + { + "bbox": [ + 312, + 545, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}" + }, + { + "bbox": [ + 312, + 545, + 555, + 713 + ], + "type": "text", + "content": ", including the number of images and object types, as well as the mean number of occluded and total objects in both splits of " + }, + { + "bbox": [ + 312, + 545, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}" + }, + { + "bbox": [ + 312, + 545, + 555, + 713 + ], + "type": "text", + "content": ". The number of objects in " + }, + { + "bbox": [ + 312, + 545, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{real}}" + }, + { + "bbox": [ + 312, + 545, + 555, + 713 + ], + "type": "text", + "content": " is shown in Fig. 3, where most images have between 0 and 30 objects. On " + }, + { + "bbox": [ + 312, + 545, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{synthetic}}" + }, + { + "bbox": [ + 312, + 545, + 555, + 713 + ], + "type": "text", + "content": ", the maximum number of objects is 15, and " + }, + { + "bbox": [ + 312, + 545, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{synthetic}}" + }, + { + "bbox": [ + 312, + 545, + 555, + 713 + ], + "type": "text", + "content": " images generally have 1-6 occluded objects (shown in Fig. 4, as further occlusion could make the count unresolvable)." + } + ] + } + ], + "index": 32 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 82, + 76, + 266, + 178 + ], + "blocks": [ + { + "bbox": [ + 82, + 76, + 266, + 178 + ], + "lines": [ + { + "bbox": [ + 82, + 76, + 266, + 178 + ], + "spans": [ + { + "bbox": [ + 82, + 76, + 266, + 178 + ], + "type": "image", + "image_path": "1bbc7a1297efec4901bf89c6b7bcce563982d0c367188c0e42f7ec57805c29ee.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 88, + 186, + 261, + 198 + ], + "lines": [ + { + "bbox": [ + 88, + 186, + 261, + 198 + ], + "spans": [ + { + "bbox": [ + 88, + 186, + 261, + 198 + ], + "type": "text", + "content": "Figure 3. # of objects in CAPTURE" + }, + { + "bbox": [ + 88, + 186, + 261, + 198 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 88, + 186, + 261, + 198 + ], + "type": "text", + "content": " images." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 83, + 208, + 266, + 297 + ], + "blocks": [ + { + "bbox": [ + 83, + 208, + 266, + 297 + ], + "lines": [ + { + "bbox": [ + 83, + 208, + 266, + 297 + ], + "spans": [ + { + "bbox": [ + 83, + 208, + 266, + 297 + ], + "type": "image", + "image_path": "b5bdfdd2908acbf8cdef268b0abf95ce0fe53e686332d8e190da4bf0e24212c7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 64, + 308, + 285, + 320 + ], + "lines": [ + { + "bbox": [ + 64, + 308, + 285, + 320 + ], + "spans": [ + { + "bbox": [ + 64, + 308, + 285, + 320 + ], + "type": "text", + "content": "Figure 4. # of occluded objects in CAPTUREsynthetic images." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 331, + 163, + 344 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 331, + 163, + 344 + ], + "spans": [ + { + "bbox": [ + 55, + 331, + 163, + 344 + ], + "type": "text", + "content": "3. Experiment Setup" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 350, + 112, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 350, + 112, + 361 + ], + "spans": [ + { + "bbox": [ + 55, + 350, + 112, + 361 + ], + "type": "text", + "content": "3.1. Models" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 368, + 295, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 368, + 295, + 608 + ], + "spans": [ + { + "bbox": [ + 55, + 368, + 295, + 608 + ], + "type": "text", + "content": "We experiment with GPT-4o [28], Intern-VL2-Llama3-8B [9, 10], Qwen2-VL-7B [41], MiniCPM-o 2.6 [47], and Kimi-VL-A3B [40] for their high scores on other VLM tasks [29]. We add Molmo 7B-D [13], because of its ability to \"point and count,\" giving it a potential advantage on CAPTURE. Specifically, Molmo is trained on millions of examples that directly ground text to 2D coordinates (or \"points\") in images. This allows Molmo to directly point to image coordinates and count more easily by pointing to several objects. All the VLMs feature a different language backbone and vision encoder to provide broad coverage of model architectures. To evaluate models, we provide the model with the name of the specific object to be counted and the explicit instruction to count fully visible objects and objects behind the occluding box (in the occluded images). For each model, we test ten prompts on a validation set of 100 images, selecting the best prompt for each model in each dataset section (CAPTURE" + }, + { + "bbox": [ + 55, + 368, + 295, + 608 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 55, + 368, + 295, + 608 + ], + "type": "text", + "content": "/CAPTURE" + }, + { + "bbox": [ + 55, + 368, + 295, + 608 + ], + "type": "inline_equation", + "content": "^{\\text{synthetic}}" + }, + { + "bbox": [ + 55, + 368, + 295, + 608 + ], + "type": "text", + "content": ") and for each environment (occluded/unoccluded). We provide the selected prompts in Appendix D." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 616, + 242, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 616, + 242, + 628 + ], + "spans": [ + { + "bbox": [ + 55, + 616, + 242, + 628 + ], + "type": "text", + "content": "3.2. Answer Generation and Extraction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 634, + 294, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 634, + 294, + 682 + ], + "spans": [ + { + "bbox": [ + 55, + 634, + 294, + 682 + ], + "type": "text", + "content": "Given the complex nature of CAPTURE, we allow models to generate open-ended responses and then subsequently extract answers. Further details (including the maximum number of tokens) can be found in Appendix A.2." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 715 + ], + "type": "text", + "content": "Answer extraction. Empirically, we found that constraining the output to a specific format for ease of analysis neg-" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 72, + 553, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 239 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 239 + ], + "type": "text", + "content": "atively impacted benchmark performance. Therefore, we instead prompt models to generate freely and extract the final output number using a separate answer extractor based on Llama 3.1 8B [1]. This answer extractor takes the output from the model as input and prompts it to extract a single number representing the final answer. The answer extractor also identifies if an output failed to converge on a singular number answer and assigns a label to these examples. We mark such incomplete/incoherent model generations as 'skipped' questions and when calculating the error later, these responses are assigned the worst possible sMAPE score (100%). The answer extractor outputs were manually verified on 1000 outputs, and the extractor was found to be 100% accurate." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 247, + 553, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 247, + 553, + 344 + ], + "spans": [ + { + "bbox": [ + 313, + 247, + 553, + 344 + ], + "type": "text", + "content": "Human and object detection baselines. We also report the performance of humans and a recent counting model (COUNTGD [3]) as baselines to establish a point of reference for model performance. To confirm that humans can perform the CAPTURE task, we provided 100 randomly selected occluded examples each from the CAPTURE" + }, + { + "bbox": [ + 313, + 247, + 553, + 344 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 313, + 247, + 553, + 344 + ], + "type": "text", + "content": " and CAPTURE" + }, + { + "bbox": [ + 313, + 247, + 553, + 344 + ], + "type": "inline_equation", + "content": "^{\\text{synthetic}}" + }, + { + "bbox": [ + 313, + 247, + 553, + 344 + ], + "type": "text", + "content": " subsets to 3 undergraduate students with no prior knowledge of the task." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 354, + 436, + 368 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 354, + 436, + 368 + ], + "spans": [ + { + "bbox": [ + 313, + 354, + 436, + 368 + ], + "type": "text", + "content": "4. Results and Analysis" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 374, + 474, + 386 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 374, + 474, + 386 + ], + "spans": [ + { + "bbox": [ + 313, + 374, + 474, + 386 + ], + "type": "text", + "content": "4.1. Main Results on CAPTUREreal" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "spans": [ + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "text", + "content": "Models consistently struggle with counting and perform worse on occluded images. We run the VLMs on the occluded and unoccluded versions of CAPTURE to discern whether occlusion significantly impacts model performance. Tab. 2 shows that all models struggle with counting generally, performing poorly on both splits. Moreover, we see that every model performs better on the unoccluded images. On average, the models perform " + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "inline_equation", + "content": "6.28\\%" + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "text", + "content": " worse in " + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{real}}" + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "text", + "content": " occluded images and " + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "inline_equation", + "content": "4.85\\%" + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "text", + "content": " worse in " + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{synthetic}}" + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "text", + "content": " occluded images (in terms of absolute sMAPE), indicating increased difficulty from a standard counting task. The best model for both splits, GPT-4o, has an error rate of " + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "inline_equation", + "content": "14.75\\%" + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{real}}" + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "text", + "content": " and a lower error rate of " + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "inline_equation", + "content": "9.71\\%" + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{synthetic}}" + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "text", + "content": ". Across both the real and synthetic split, GPT-4o's error increases with occlusion, by " + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "inline_equation", + "content": "1.41\\%" + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "text", + "content": " on the real data and " + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "inline_equation", + "content": "3.81\\%" + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "text", + "content": " on the synthetic split. Interestingly, despite its fine-tuning on counting tasks, Molmo exhibits a sizable error rate of " + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "inline_equation", + "content": "32.5\\%" + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{real}}" + }, + { + "bbox": [ + 313, + 395, + 553, + 670 + ], + "type": "text", + "content": " occluded images. The high error rates of VLMs indicate limited capabilities in visual understanding under occlusions, pattern recognition, and counting. We further analyze the source of these errors with oracle experiments in Sec. 4.3." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 677, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 553, + 713 + ], + "type": "text", + "content": "Humans complete the task with almost no error. Tab. 3, evaluated on a 100-example subset of each split, confirms that humans complete the task with ease despite occlusion," + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 156, + 70, + 452, + 201 + ], + "blocks": [ + { + "bbox": [ + 156, + 70, + 452, + 201 + ], + "lines": [ + { + "bbox": [ + 156, + 70, + 452, + 201 + ], + "spans": [ + { + "bbox": [ + 156, + 70, + 452, + 201 + ], + "type": "table", + "html": "
ModelError (%) [↓]
CAPTURErealCAPTUREsynthetic
Originalw/ Occlusion (Δ)Originalw/ Occlusion (Δ)
GPT-4o13.3414.75 (+1.41)5.909.71 (+3.81)
InternVL226.1732.90 (+6.73)16.4417.57 (+1.13)
Molmo25.9032.49 (+6.59)8.4017.73 (+9.33)
Qwen2VL18.9629.33 (+10.37)6.6311.74 (+5.11)
MiniCPM-o 2.623.8430.08 (+6.24)17.0619.00 (+1.94)
Kimi-VL-A3B23.4825.96 (+2.48)16.9118.07 (+1.16)
Avg. of 6 VLMs21.9527.59 (+5.64)11.8915.64 (+3.75)
", + "image_path": "4518a5465c290528c213f388056a3af4202975be799d21131dcfbeb4f70ff0f6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 67, + 239, + 282, + 373 + ], + "blocks": [ + { + "bbox": [ + 72, + 209, + 536, + 220 + ], + "lines": [ + { + "bbox": [ + 72, + 209, + 536, + 220 + ], + "spans": [ + { + "bbox": [ + 72, + 209, + 536, + 220 + ], + "type": "text", + "content": "Table 2. Results across VLMs on all splits of CAPTURE, with average error for each column. Metric: sMAPE (lower is better)." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 67, + 239, + 282, + 373 + ], + "lines": [ + { + "bbox": [ + 67, + 239, + 282, + 373 + ], + "spans": [ + { + "bbox": [ + 67, + 239, + 282, + 373 + ], + "type": "table", + "html": "
ModelError (%) [↓]
CAPTURErealCAPTUREsynthetic
(Baseline)
Human3.790.92
(VLMs)
GPT-4o14.759.71
InternVL232.9017.57
Molmo32.4917.73
Qwen2VL29.3311.74
Avg. of 4 VLMs27.3714.19
", + "image_path": "b2289750d0a6af17eb500803ad640c7b0e5d851d29c011084ed404545dd9bf6b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 380, + 294, + 403 + ], + "lines": [ + { + "bbox": [ + 55, + 380, + 294, + 403 + ], + "spans": [ + { + "bbox": [ + 55, + 380, + 294, + 403 + ], + "type": "text", + "content": "Table 3. Human baseline vs VLMs on CAPTURE" + }, + { + "bbox": [ + 55, + 380, + 294, + 403 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 55, + 380, + 294, + 403 + ], + "type": "text", + "content": " and CAPTURE" + }, + { + "bbox": [ + 55, + 380, + 294, + 403 + ], + "type": "inline_equation", + "content": "^{\\text{synthetic}}" + }, + { + "bbox": [ + 55, + 380, + 294, + 403 + ], + "type": "text", + "content": " (occluded split). Metric: sMAPE (lower is better)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 416, + 295, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 416, + 295, + 477 + ], + "spans": [ + { + "bbox": [ + 55, + 416, + 295, + 477 + ], + "type": "text", + "content": "with an sMAPE of " + }, + { + "bbox": [ + 55, + 416, + 295, + 477 + ], + "type": "inline_equation", + "content": "3.79\\%" + }, + { + "bbox": [ + 55, + 416, + 295, + 477 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 55, + 416, + 295, + 477 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{real}}" + }, + { + "bbox": [ + 55, + 416, + 295, + 477 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 416, + 295, + 477 + ], + "type": "inline_equation", + "content": "0.92\\%" + }, + { + "bbox": [ + 55, + 416, + 295, + 477 + ], + "type": "text", + "content": " on " + }, + { + "bbox": [ + 55, + 416, + 295, + 477 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{synthetic}}" + }, + { + "bbox": [ + 55, + 416, + 295, + 477 + ], + "type": "text", + "content": ". On the same subset of examples, models performed 7 times worse on " + }, + { + "bbox": [ + 55, + 416, + 295, + 477 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{real}}" + }, + { + "bbox": [ + 55, + 416, + 295, + 477 + ], + "type": "text", + "content": " and 14 times worse on " + }, + { + "bbox": [ + 55, + 416, + 295, + 477 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{synthetic}}" + }, + { + "bbox": [ + 55, + 416, + 295, + 477 + ], + "type": "text", + "content": " than humans, underscoring the gap between VLMs and humans in this task." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 485, + 295, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 485, + 295, + 616 + ], + "spans": [ + { + "bbox": [ + 54, + 485, + 295, + 616 + ], + "type": "text", + "content": "Object detection-based baseline outperforms VLMs. We attempt the task with a strong object detection-based model to highlight that a standard counting approach will experience a greater loss going from unoccluded to occluded environments, as it cannot capture any occluded objects, i.e. cannot reason. We choose COUNTGD [3], the top solution for unoccluded counting on FSC-147, on which it was trained. Because we draw our images from FSC-147's train and test sets, and COUNTGD trains on FSC-147, we only evaluate COUNTGD on the subset of our data sourced from the FSC-147 test set, consisting of 149 images." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 617, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 617, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 617, + 295, + 714 + ], + "type": "text", + "content": "We find that COUNTGD deteriorates by " + }, + { + "bbox": [ + 55, + 617, + 295, + 714 + ], + "type": "inline_equation", + "content": "7.19\\%" + }, + { + "bbox": [ + 55, + 617, + 295, + 714 + ], + "type": "text", + "content": " on occluded images, increasing from " + }, + { + "bbox": [ + 55, + 617, + 295, + 714 + ], + "type": "inline_equation", + "content": "3.15\\%" + }, + { + "bbox": [ + 55, + 617, + 295, + 714 + ], + "type": "text", + "content": " sMAPE to " + }, + { + "bbox": [ + 55, + 617, + 295, + 714 + ], + "type": "inline_equation", + "content": "10.34\\%" + }, + { + "bbox": [ + 55, + 617, + 295, + 714 + ], + "type": "text", + "content": " as observed in Fig. 5. As expected, COUNTGD outperforms all VLMs on the unoccluded split as it is trained for counting on FSC-147. COUNTGD also outperforms the VLMs on the occluded split, reinforcing that only counting the visible objects is a hard-to-beat baseline. However, the drop in performance with occlusion is greater than the average" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 317, + 242, + 550, + 315 + ], + "blocks": [ + { + "bbox": [ + 317, + 242, + 550, + 315 + ], + "lines": [ + { + "bbox": [ + 317, + 242, + 550, + 315 + ], + "spans": [ + { + "bbox": [ + 317, + 242, + 550, + 315 + ], + "type": "image", + "image_path": "6852ecb13e232280415a379065d7456c9457cca7da26b7d7923a10788f9ddee8.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 325, + 555, + 358 + ], + "lines": [ + { + "bbox": [ + 313, + 325, + 555, + 358 + ], + "spans": [ + { + "bbox": [ + 313, + 325, + 555, + 358 + ], + "type": "text", + "content": "Figure 5. VLM vs. VLM + CountGD hybrid on questions from the CAPTURE" + }, + { + "bbox": [ + 313, + 325, + 555, + 358 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 313, + 325, + 555, + 358 + ], + "type": "text", + "content": " (occluded split) that are not in COUNTGD training set. Metric: sMAPE (lower is better)." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 374, + 554, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 374, + 554, + 435 + ], + "spans": [ + { + "bbox": [ + 313, + 374, + 554, + 435 + ], + "type": "text", + "content": "VLM's drop, highlighting a disadvantage of non-reasoning solutions on CAPTURE: their error is necessarily tied directly to the number of occluded objects and they cannot address the task on their own, whereas a VLM might be able to infer missing objects via reasoning." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 441, + 555, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 441, + 555, + 562 + ], + "spans": [ + { + "bbox": [ + 313, + 441, + 555, + 562 + ], + "type": "text", + "content": "Hybrid VLM counting systems improve performance. Finding that COUNTGD is far better at counting visible objects than VLMs, we leverage the advantage that COUNTGD has by feeding its visible object count information to the VLMs as part of the prompt. As expected, Fig. 5 illustrates that there is a considerable decrease in error when CountGD and the VLMs are combined. However, this hybrid system still performs worse than COUNTGD alone, indicating VLMs are still subpar even at counting just occluded objects (as further reinforced by Appendix C.3)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 569, + 545, + 580 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 569, + 545, + 580 + ], + "spans": [ + { + "bbox": [ + 313, + 569, + 545, + 580 + ], + "type": "text", + "content": "4.2. Effect of Data Factors on VLM Performance" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 586, + 554, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 586, + 554, + 682 + ], + "spans": [ + { + "bbox": [ + 313, + 586, + 554, + 682 + ], + "type": "text", + "content": "Here, we use the CAPTURE" + }, + { + "bbox": [ + 313, + 586, + 554, + 682 + ], + "type": "inline_equation", + "content": "^{\\text{synthetic}}" + }, + { + "bbox": [ + 313, + 586, + 554, + 682 + ], + "type": "text", + "content": " data (which can be controlled precisely and fully annotated) to examine which features correlate with model performance. We test the effect of the following variables on final performance: (1) Increasing the number of occluded objects; (2) Varying the pattern. We also investigate whether models can classify patterns, and to what degree models can predict the number of occluded objects only (rather than the total)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 689, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 554, + 713 + ], + "type": "text", + "content": "Models perform worse when more dots are occluded. In Fig. 6 (right), we observe that error increases with re" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 308, + 741 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 72, + 312, + 182 + ], + "blocks": [ + { + "bbox": [ + 109, + 72, + 312, + 182 + ], + "lines": [ + { + "bbox": [ + 109, + 72, + 312, + 182 + ], + "spans": [ + { + "bbox": [ + 109, + 72, + 312, + 182 + ], + "type": "image", + "image_path": "d1ff88f8e06685dc0b485224cacfa612ed34e24c8db13ff474eb31b034de7999.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 190, + 555, + 213 + ], + "lines": [ + { + "bbox": [ + 55, + 190, + 555, + 213 + ], + "spans": [ + { + "bbox": [ + 55, + 190, + 555, + 213 + ], + "type": "text", + "content": "Figure 6. Effect of number of total objects in the image and number of occluded objects on sMAPE from " + }, + { + "bbox": [ + 55, + 190, + 555, + 213 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{synthetic}}" + }, + { + "bbox": [ + 55, + 190, + 555, + 213 + ], + "type": "text", + "content": " (occluded split). Metric: sMAPE (lower is better)." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 314, + 72, + 498, + 182 + ], + "blocks": [ + { + "bbox": [ + 314, + 72, + 498, + 182 + ], + "lines": [ + { + "bbox": [ + 314, + 72, + 498, + 182 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 498, + 182 + ], + "type": "image", + "image_path": "0aacafaba92370dae1f0e5b5f4e96b3961bd610b4f253bcf291566faf27d3a59.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 69, + 230, + 279, + 355 + ], + "blocks": [ + { + "bbox": [ + 69, + 230, + 279, + 355 + ], + "lines": [ + { + "bbox": [ + 69, + 230, + 279, + 355 + ], + "spans": [ + { + "bbox": [ + 69, + 230, + 279, + 355 + ], + "type": "image", + "image_path": "6dcb61ccb0400b9be87716c1ec763a1aee18b1ea03c0a515f3207f2846c30ff6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 361, + 295, + 384 + ], + "lines": [ + { + "bbox": [ + 55, + 361, + 295, + 384 + ], + "spans": [ + { + "bbox": [ + 55, + 361, + 295, + 384 + ], + "type": "text", + "content": "Figure 7. Effect of pattern type in CAPTURE" + }, + { + "bbox": [ + 55, + 361, + 295, + 384 + ], + "type": "inline_equation", + "content": "^{\\text{synthetic}}" + }, + { + "bbox": [ + 55, + 361, + 295, + 384 + ], + "type": "text", + "content": " (occluded split) on sMAPE. Metric: sMAPE (lower is better)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 398, + 296, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 398, + 296, + 495 + ], + "spans": [ + { + "bbox": [ + 55, + 398, + 296, + 495 + ], + "type": "text", + "content": "spect to the number of occluded dots. However, Fig. 6 (left) also shows that performance is less affected by the total number of dots. This suggests that the task difficulty is more closely correlated with the difficulty of occlusion – i.e. the difficulty of the world modeling task – rather than the complexity of the pattern. Some models, such as GPT-4o, deviate from this trend, which has lower error on specific numbers. We further explore model bias in Appendix C.5." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 502, + 296, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 502, + 296, + 574 + ], + "spans": [ + { + "bbox": [ + 55, + 502, + 296, + 574 + ], + "type": "text", + "content": "Performance depends on pattern type. The controllability of " + }, + { + "bbox": [ + 55, + 502, + 296, + 574 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{synthetic}}" + }, + { + "bbox": [ + 55, + 502, + 296, + 574 + ], + "type": "text", + "content": " allows us to measure the effect of pattern type on performance. In Fig. 7, we find that model performance differs across shapes with some regularity: objects arranged in a circle generally have lower sMAPE than other shapes, across all models. Qwen2VL has an espe" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 80, + 586, + 269, + 684 + ], + "blocks": [ + { + "bbox": [ + 80, + 586, + 269, + 684 + ], + "lines": [ + { + "bbox": [ + 80, + 586, + 269, + 684 + ], + "spans": [ + { + "bbox": [ + 80, + 586, + 269, + 684 + ], + "type": "table", + "html": "
ModelAccuracy (%) [↑]
Originalw/ Occlusion (Δ)
GPT-4o84.0078.52 (-5.48)
InternVL268.5247.48 (-21.04)
Molmo80.7065.22 (-15.48)
Qwen2VL88.3586.43 (-1.92)
Avg. of 4 VLMs80.3969.41 (-10.98)
", + "image_path": "ce5800214bd41b55746d8d1c7787aaa4f4fc6e766570fdd0f051c236d58c6c8e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 693, + 295, + 715 + ], + "lines": [ + { + "bbox": [ + 55, + 693, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 693, + 295, + 715 + ], + "type": "text", + "content": "Table 4. VLM accuracy in identifying the correct pattern in CAPTUREsynthetic. Metric: accuracy (higher is better)." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 229, + 553, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 229, + 553, + 254 + ], + "spans": [ + { + "bbox": [ + 313, + 229, + 553, + 254 + ], + "type": "text", + "content": "cially large decrease in error when given circular arrangements compared to rectangles or triangles." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 261, + 555, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 261, + 555, + 440 + ], + "spans": [ + { + "bbox": [ + 313, + 261, + 555, + 440 + ], + "type": "text", + "content": "Models can identify patterns. To determine how much model errors can be attributed to a lack of pattern recognition ability, we formulate a separate task where models must recognize the pattern in the image on CAPTURE" + }, + { + "bbox": [ + 313, + 261, + 555, + 440 + ], + "type": "inline_equation", + "content": "^{\\text{synthetic}}" + }, + { + "bbox": [ + 313, + 261, + 555, + 440 + ], + "type": "text", + "content": ". Here, we frame the task as multiple-choice, asking the model to select from the pattern types available (rectangle, triangle, or circle). Table 4 illustrates that all perform substantially better than random at this task, with most models except InternVL2 achieving accuracy above " + }, + { + "bbox": [ + 313, + 261, + 555, + 440 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 313, + 261, + 555, + 440 + ], + "type": "text", + "content": " in the unoccluded setting. As expected, the patterns were easier to identify in unoccluded scenarios, with models suffering an average accuracy drop of " + }, + { + "bbox": [ + 313, + 261, + 555, + 440 + ], + "type": "inline_equation", + "content": "10.95\\%" + }, + { + "bbox": [ + 313, + 261, + 555, + 440 + ], + "type": "text", + "content": " in the occluded setting. Notably, GPT-4o and Qwen2VL have a fairly small drop in performance, suggesting they can generally capture the pattern even in the presence of occlusion." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 449, + 506, + 461 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 449, + 506, + 461 + ], + "spans": [ + { + "bbox": [ + 313, + 449, + 506, + 461 + ], + "type": "text", + "content": "4.3. Analysis with Auxiliary Information" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 466, + 555, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 466, + 555, + 658 + ], + "spans": [ + { + "bbox": [ + 313, + 466, + 555, + 658 + ], + "type": "text", + "content": "In Sec. 4.1, we see that models broadly struggle with amodal counting. Here, we seek to disentangle whether this problem results from a failure to reason, the absence of a world model, or both by giving VLMs two different types of auxiliary information: oracle information and predicted information. Oracle information is ground truth and is directly pulled from CAPTURE's metadata, e.g., object locations. Predicted information generates new information from a completely separate model and gives it to the VLM. This information is not ground truth and is sourced from an external model, such as an image inpainting model, rather than the VLM. By giving the model auxiliary information in the form of reasoning and spatial clues, we can establish how much of each model's error results from an inability to handle occlusion rather than an inability to recognize and count visible objects." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 665, + 556, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 665, + 556, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 665, + 556, + 713 + ], + "type": "text", + "content": "Oracle setup. We test two oracles for " + }, + { + "bbox": [ + 313, + 665, + 556, + 713 + ], + "type": "inline_equation", + "content": "\\mathsf{CAPTURE}^{\\mathsf{real}}" + }, + { + "bbox": [ + 313, + 665, + 556, + 713 + ], + "type": "text", + "content": "'s occluded split based on its constituent subtasks: counting the visible objects and inferring/counting occluded objects. Both oracles provide the VLM with text-based coordinates" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 83, + 181, + 156 + ], + "blocks": [ + { + "bbox": [ + 93, + 74, + 148, + 82 + ], + "lines": [ + { + "bbox": [ + 93, + 74, + 148, + 82 + ], + "spans": [ + { + "bbox": [ + 93, + 74, + 148, + 82 + ], + "type": "text", + "content": "With Occlusion" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 60, + 83, + 181, + 156 + ], + "lines": [ + { + "bbox": [ + 60, + 83, + 181, + 156 + ], + "spans": [ + { + "bbox": [ + 60, + 83, + 181, + 156 + ], + "type": "image", + "image_path": "c9dcd23e293902294ad0bb304302126a7e2ee754cdd92911fd7e5aabcfc11d68.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 61, + 158, + 178, + 209 + ], + "lines": [ + { + "bbox": [ + 61, + 158, + 178, + 209 + ], + "spans": [ + { + "bbox": [ + 61, + 158, + 178, + 209 + ], + "type": "text", + "content": "With Occlusion Prompt: Count the exact number of cans in the image. Assume the pattern of cans continues behind any black box. Provide the total number of cans as if the black box were not there." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 241, + 555, + 285 + ], + "lines": [ + { + "bbox": [ + 55, + 241, + 555, + 285 + ], + "spans": [ + { + "bbox": [ + 55, + 241, + 555, + 285 + ], + "type": "text", + "content": "Figure 8. Example image and text inputs for experiments with auxiliary information experiments (Sec. 4.3). Blue eyes indicate objects for which the All Object Coordinate Oracle or Visible Object Coordinate Oracle extracts coordinates. The brighter part of the image represents the area which Inpainting Pipeline fills in. Example prompts are shown in italics. Blue eye overlays and faded parts of images are for demonstration purposes and are not passed with the image." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 185, + 83, + 304, + 156 + ], + "blocks": [ + { + "bbox": [ + 194, + 74, + 294, + 83 + ], + "lines": [ + { + "bbox": [ + 194, + 74, + 294, + 83 + ], + "spans": [ + { + "bbox": [ + 194, + 74, + 294, + 83 + ], + "type": "text", + "content": "All Object Coordinate Oracle" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 185, + 83, + 304, + 156 + ], + "lines": [ + { + "bbox": [ + 185, + 83, + 304, + 156 + ], + "spans": [ + { + "bbox": [ + 185, + 83, + 304, + 156 + ], + "type": "image", + "image_path": "fdce1f292eb8c63e5d8886496c488350aad21c286ba6461c0abc5dfe7d1ccd31.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 185, + 158, + 304, + 236 + ], + "lines": [ + { + "bbox": [ + 185, + 158, + 304, + 236 + ], + "spans": [ + { + "bbox": [ + 185, + 158, + 304, + 236 + ], + "type": "text", + "content": "(w/Oracle information) \nAll Object Coordinate Oracle Prompt: Count the exact number of cans in the image, including behind the black box... Coordinates of all cans:59,43219,38356,43 522,3663,18073,335214 186),379,184),524,177220 332372,329525,325" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 306, + 83, + 428, + 156 + ], + "blocks": [ + { + "bbox": [ + 310, + 74, + 424, + 83 + ], + "lines": [ + { + "bbox": [ + 310, + 74, + 424, + 83 + ], + "spans": [ + { + "bbox": [ + 310, + 74, + 424, + 83 + ], + "type": "text", + "content": "Visible Object Coordinate Oracle" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 306, + 83, + 428, + 156 + ], + "lines": [ + { + "bbox": [ + 306, + 83, + 428, + 156 + ], + "spans": [ + { + "bbox": [ + 306, + 83, + 428, + 156 + ], + "type": "image", + "image_path": "d862b1fc3435e11a6f995250f084863846b2a1867e3e95f87cda3d508c3650df.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 158, + 418, + 228 + ], + "lines": [ + { + "bbox": [ + 310, + 158, + 418, + 228 + ], + "spans": [ + { + "bbox": [ + 310, + 158, + 418, + 228 + ], + "type": "text", + "content": "(w/Oracle information) \nVisible Object Coordinate \nOracle Prompt: Count the exact number of cans in the image, including behind the black box... \nCoordinates of visible cans: (59, 43), (219, 38), (356, 43), (522, 36), (63, 180), (73, 335)" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 430, + 83, + 551, + 156 + ], + "blocks": [ + { + "bbox": [ + 457, + 75, + 523, + 83 + ], + "lines": [ + { + "bbox": [ + 457, + 75, + 523, + 83 + ], + "spans": [ + { + "bbox": [ + 457, + 75, + 523, + 83 + ], + "type": "text", + "content": "Inpainting Pipeline" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 430, + 83, + 551, + 156 + ], + "lines": [ + { + "bbox": [ + 430, + 83, + 551, + 156 + ], + "spans": [ + { + "bbox": [ + 430, + 83, + 551, + 156 + ], + "type": "image", + "image_path": "8578bf7e10fea3496bee51ad25b496e2829784d6fda6ca668364777004884e8c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 436, + 158, + 541, + 192 + ], + "lines": [ + { + "bbox": [ + 436, + 158, + 541, + 192 + ], + "spans": [ + { + "bbox": [ + 436, + 158, + 541, + 192 + ], + "type": "text", + "content": "(w/ Predicted information) \nInpainting Pipeline Prompt: \nCount the exact number of cans in the image." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 435, + 201, + 544, + 228 + ], + "lines": [ + { + "bbox": [ + 435, + 201, + 544, + 228 + ], + "spans": [ + { + "bbox": [ + 435, + 201, + 544, + 228 + ], + "type": "text", + "content": "(Fading added only for emphasis to visualize infilling. Final image given to VLM is not faded)" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 99, + 297, + 511, + 381 + ], + "blocks": [ + { + "bbox": [ + 99, + 297, + 511, + 381 + ], + "lines": [ + { + "bbox": [ + 99, + 297, + 511, + 381 + ], + "spans": [ + { + "bbox": [ + 99, + 297, + 511, + 381 + ], + "type": "table", + "html": "
ModelOriginalw/ OcclusionOracle InformationPredicted Information
+ All Coordinates (Δ)+ Visible (Δ)+ Inpainting (Δ)
GPT-4o13.3414.752.93 (-11.82)9.20 (-5.55)15.89 (+1.14)
InternVL226.1732.9017.48 (-15.42)25.13 (-7.77)31.12 (-1.78)
Qwen2VL18.9629.339.62 (-19.71)17.70 (-11.63)22.64 (-6.69)
Avg. of 3 VLMs19.4925.6610.01 (-15.65)17.34 (-8.32)23.22 (-2.44)
", + "image_path": "766d3a6289f8b6fdb33abb8032609a92274b1bc67ea432f698f4bdcbf6a1cf2d.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 61, + 390, + 546, + 401 + ], + "lines": [ + { + "bbox": [ + 61, + 390, + 546, + 401 + ], + "spans": [ + { + "bbox": [ + 61, + 390, + 546, + 401 + ], + "type": "text", + "content": "Table 5. Effect of auxiliary information on occluded CAPTURE " + }, + { + "bbox": [ + 61, + 390, + 546, + 401 + ], + "type": "inline_equation", + "content": "{}^{\\text{real. }}\\Delta =" + }, + { + "bbox": [ + 61, + 390, + 546, + 401 + ], + "type": "text", + "content": " (Auxiliary Information) - (w/ Occlusion). Metric: sMAPE." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 54, + 422, + 297, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 422, + 297, + 626 + ], + "spans": [ + { + "bbox": [ + 54, + 422, + 297, + 626 + ], + "type": "text", + "content": "of objects in the image, simplifying the visual task by assuming the VLM effectively has a perfect visual system that can recognize and localize objects in the image. The first oracle, the Visible Object Coordinate Oracle, gives the VLM the coordinates of all unoccluded objects (encoded as text, as seen in Fig. 8) and instructs the model to estimate the number of occluded objects, count the number of visible object coordinates, and add the two. In other words, the model is given oracle information about what objects are visible, thus also revealing key information about the pattern. The second oracle, the All Object Coordinate Oracle, instead gives the model the coordinates of all objects. Here, the model only needs to count the coordinates in the prompt, eliminating the need to reason on the visual input. Note that Molmo is excluded in these tests because it contains a prompt limit that would truncate the list of coordinates. An example of the oracle inputs can be seen in Fig. 8." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 55, + 641, + 296, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 641, + 296, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 641, + 296, + 714 + ], + "type": "text", + "content": "Prediction setup. In this setting, we provide the VLM with an external world model representation predicted by another model. Specifically, we develop the Inpainting Pipeline to fill in the occluded region via a diffusion-based inpainting model and pass the inpainted image to the VLMs. For the inpainting model, we choose FLUX.1-Fill [dev]," + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 422, + 555, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 422, + 555, + 460 + ], + "spans": [ + { + "bbox": [ + 313, + 422, + 555, + 460 + ], + "type": "text", + "content": "whose backbone FLUX.1 [dev] [21] is a top public model in the Text to Image Model Arena [7]. An example input to the VLM can be seen on the far-right of Fig. 8." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 474, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 474, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 474, + 556, + 715 + ], + "type": "text", + "content": "Providing visible or all object coordinates improves performance substantially. The results in Tab. 2 indicate that models struggle on CAPTURE, which requires identifying a pattern and counting both visible and occluded objects. Moreover, models generally struggle with counting even in unoccluded settings. Both oracles simplify the counting task: All Object Coordinate Oracle reduces the task to simply counting coordinates with no reasoning involved, and Visible Object Coordinate Oracle similarly simplifies the task for visible objects, while still requiring inferring occluded objects. Additionally, under Visible Object Coordinate Oracle, recognizing the pattern shifts from a visual reasoning task to an augmented math problem. Instead of visually reasoning about where objects are located, the VLM considers what patterns the coordinates could make. Translating this task into a text problem results in an average increase of " + }, + { + "bbox": [ + 313, + 474, + 556, + 715 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 313, + 474, + 556, + 715 + ], + "type": "text", + "content": " with all objects coordinate oracle; the errors LLMs make here are due to an inability to count in the text prompt, as opposed to weaknesses in handling occlusion (since all object coordinates are given), and the strongest" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 741 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 57, + 72, + 294, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 72, + 294, + 167 + ], + "spans": [ + { + "bbox": [ + 57, + 72, + 294, + 167 + ], + "type": "text", + "content": "model, GPT-4o, achieves minimal error here. We also obtain an average increase of " + }, + { + "bbox": [ + 57, + 72, + 294, + 167 + ], + "type": "inline_equation", + "content": "8\\%" + }, + { + "bbox": [ + 57, + 72, + 294, + 167 + ], + "type": "text", + "content": " with the visible objects coordinate oracle (shown in Tab. 5), possibly because it allows the more powerful LLM backbone (which is far larger than the vision model in all models tested) to complete the counting task. Taken together, these results suggest that there is much room for improvement in visual world modeling beyond text-based reasoning of VLMs." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 177, + 294, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 177, + 294, + 367 + ], + "spans": [ + { + "bbox": [ + 57, + 177, + 294, + 367 + ], + "type": "text", + "content": "Providing diffusion-based inpainting improves performance marginally. Similar to the object coordinate oracles, the Inpainting Pipeline (rightmost columns in Fig. 8 and Tab. 5) eliminates the need for world modeling and provides VLMs with an approximation of the image behind the occluder. With the inpainted images, VLM error decreases by almost " + }, + { + "bbox": [ + 57, + 177, + 294, + 367 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 57, + 177, + 294, + 367 + ], + "type": "text", + "content": " for InternVL2 and " + }, + { + "bbox": [ + 57, + 177, + 294, + 367 + ], + "type": "inline_equation", + "content": "7\\%" + }, + { + "bbox": [ + 57, + 177, + 294, + 367 + ], + "type": "text", + "content": " for Qwen2VL compared to the original occluded images. GPT-4o's error increases on inpainted images by a small margin; we hypothesize that this may be because GPT-4o has one of the better world models (based on its superior performance), and thus does not improve further with the inpainted images. Moreover, every VLM still falls short of its unoccluded image performance, indicating that the diffusion model is not a perfect world model. Qualitatively, we find that the inpainting model sometimes fails to output the correct pattern." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 58, + 383, + 140, + 393 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 58, + 383, + 140, + 393 + ], + "spans": [ + { + "bbox": [ + 58, + 383, + 140, + 393 + ], + "type": "text", + "content": "5. Related Work" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 57, + 406, + 294, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 406, + 294, + 573 + ], + "spans": [ + { + "bbox": [ + 57, + 406, + 294, + 573 + ], + "type": "text", + "content": "Spatial reasoning in visual question answering. Past work measures the spatial reasoning capabilities of VLMs in the form of visual question answering (VQA) [4, 16] benchmarks. SpartQA [26] asks VLMs to identify the spatial relation (e.g., above, behind, left of) between objects in synthetically created 2D images from NLVR [39]. More recent benchmarks test similar spatial relation understanding with real images [2, 24, 36]. While this past work asks models to provide a text description for a relation between two fully observed objects, CAPTURE measures the world modeling from a partially observed scene, thus requiring the handling of occlusion, pattern recognition, and counting. Together, these constitute a stricter test of spatial reasoning than typical VQA settings." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 57, + 582, + 294, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 582, + 294, + 712 + ], + "spans": [ + { + "bbox": [ + 57, + 582, + 294, + 712 + ], + "type": "text", + "content": "Amodal completion. Occlusions are common in natural scenes, and vision solutions for amodal completion have made significant progress in infilling occlusions [6, 38, 46]. The amodal completion task has evolved from simply completing a shape to filling in appearance (e.g., texture, color, etc.) to finally dealing with fine-grained order perception (multiple stacked occluded objects) [5]. Specifically in Qiu and Di [34], VLMs classify the hidden objects and extract fine details from occluded items. CAPTURE, however, presents a unique category of patterned amodal counting which requires inferring fully occluded objects based on a" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 316, + 72, + 553, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 72, + 553, + 167 + ], + "spans": [ + { + "bbox": [ + 316, + 72, + 553, + 167 + ], + "type": "text", + "content": "pattern rather than inferring occluded object wholes based on object parts. In other words, previous work has only attempted tasks that require amodal completion for one object at a time [31, 38, 46], whereas CAPTURE handles multiple objects. Multi-object amodal completion is crucial because in cluttered scenes, entire groups of objects are often occluded. Moreover, the output space of CAPTURE is language (rather than filling pixels)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 316, + 175, + 553, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 175, + 553, + 365 + ], + "spans": [ + { + "bbox": [ + 316, + 175, + 553, + 365 + ], + "type": "text", + "content": "Counting with vision-and-language models. Within the task of counting, the most similar application to CAPTURE is dense counting, where the objects to be counted occlude each other. There are many practical applications of such a task, like counting cells on a crowded slide [8], determining crop yields from densely-packed fields [43], or crowd counting [14, 44, 48]. Liang et al. [23] improved crowd counting with an augmented CLIP [35], i.e. also using VLMs for counting. Additionally, Jenkins et al. [18] introduced an amodal counting benchmark, presenting an occluded 3D counting task where models must count objects on retail shelves. However, our work differs in many ways, as Jenkins et al. [18] only counts retail shelves and uses Li-DAR input. More broadly, dense counting focuses on overlapping objects rather than on counting objects arranged into patterns, which is the focus of CAPTURE." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 316, + 377, + 386, + 388 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 377, + 386, + 388 + ], + "spans": [ + { + "bbox": [ + 316, + 377, + 386, + 388 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 316, + 396, + 553, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 396, + 553, + 575 + ], + "spans": [ + { + "bbox": [ + 316, + 396, + 553, + 575 + ], + "type": "text", + "content": "We introduced CAPTURE, a novel benchmark for amodal counting that measures spatial reasoning capabilities under occlusion. CAPTURE is designed to assess VLMs' ability to form a robust world model and use that model for visual reasoning skills under occlusion. By testing counting, we cast the problem as a measurable task with an objective correct answer that also has real-world utility as VLMs become more broadly adopted. Our results suggest that VLMs struggle to combine reasoning, counting, and world modeling with low performance on occluded and unoccluded images. Our analysis indicates that models improve with oracle information about visible objects (simplifying the reasoning/counting tasks) and predicted information about the occluded objects (also simplifying world modeling), pointing to directions of model improvement." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 316, + 586, + 410, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 586, + 410, + 598 + ], + "spans": [ + { + "bbox": [ + 316, + 586, + 410, + 598 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 316, + 605, + 553, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 605, + 553, + 712 + ], + "spans": [ + { + "bbox": [ + 316, + 605, + 553, + 712 + ], + "type": "text", + "content": "This work was supported by DARPA ECOLE Program No. HR00112390060, NSF-CAREER Award 1846185, NSF-AI Engage Institute DRL-2112635, DARPA Machine Commonsense (MCS) Grant N66001-19-2-4031, ARO Award W911NF2110220, ONR Grant N00014-23-1-2356, Microsoft Accelerate Foundation Models Research (AFMR) grant program, and a Bloomberg Data Science PhD Fellowship. The views contained in this article are those of the authors and not of the funding agency." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "spans": [ + { + "bbox": [ + 302, + 733, + 308, + 741 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 91, + 295, + 713 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 61, + 91, + 294, + 111 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 91, + 294, + 111 + ], + "spans": [ + { + "bbox": [ + 61, + 91, + 294, + 111 + ], + "type": "text", + "content": "[1] AI@Meta. Llama 3.1 model card. *Github Model Card*, 2024. 4" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 114, + 295, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 114, + 295, + 167 + ], + "spans": [ + { + "bbox": [ + 61, + 114, + 295, + 167 + ], + "type": "text", + "content": "[2] Haider Al-Tahan, Quentin Garrido, Randall Balestriero, Diane Bouchacourt, Caner Hazirbas, and Mark Ibrahim. Unibench: Visual reasoning requires rethinking vision-language beyond scaling. arXiv preprint arXiv:2408.04810, 2024. 8" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 170, + 295, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 170, + 295, + 201 + ], + "spans": [ + { + "bbox": [ + 62, + 170, + 295, + 201 + ], + "type": "text", + "content": "[3] Niki Amini-Naeni, Tengda Han, and Andrew Zisserman. Countgd: Multi-modal open-world counting. arXiv preprint arXiv:2407.04619, 2024. 2, 4, 5" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 203, + 295, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 203, + 295, + 256 + ], + "spans": [ + { + "bbox": [ + 62, + 203, + 295, + 256 + ], + "type": "text", + "content": "[4] Stanislaw Antol, Aishwarya Agrawal, Jiasen Lu, Margaret Mitchell, Dhruv Batra, C Lawrence Zitnick, and Devi Parikh. Vqa: Visual question answering. In Proceedings of the IEEE international conference on computer vision, pages 2425-2433, 2015. 8" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 258, + 295, + 290 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 258, + 295, + 290 + ], + "spans": [ + { + "bbox": [ + 62, + 258, + 295, + 290 + ], + "type": "text", + "content": "[5] Jiayang Ao, Qiuhong Ke, and Krista A Ehinger. Image amodal completion: A survey. Computer Vision and Image Understanding, 229:103661, 2023. 1, 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 292, + 295, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 292, + 295, + 324 + ], + "spans": [ + { + "bbox": [ + 62, + 292, + 295, + 324 + ], + "type": "text", + "content": "[6] Jiayang Ao, Yanbei Jiang, Qiuhong Ke, and Krista A Ehinger. Open-world amodal appearance completion. arXiv preprint arXiv:2411.13019, 2024. 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 326, + 295, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 326, + 295, + 346 + ], + "spans": [ + { + "bbox": [ + 62, + 326, + 295, + 346 + ], + "type": "text", + "content": "[7] Artificial Analysis. Text to image model arena, 2025. Accessed: April 10, 2025. 7" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 348, + 295, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 348, + 295, + 392 + ], + "spans": [ + { + "bbox": [ + 62, + 348, + 295, + 392 + ], + "type": "text", + "content": "[8] Soumen Bera. Partially occluded object detection and counting. In Proceedings of the 2015 Third International Conference on Computer, Communication, Control and Information Technology (C3IT), pages 1-6. IEEE, 2015. 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 393, + 295, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 393, + 295, + 456 + ], + "spans": [ + { + "bbox": [ + 62, + 393, + 295, + 456 + ], + "type": "text", + "content": "[9] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, Bin Li, Ping Luo, Tong Lu, Yu Qiao, and Jifeng Dai. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. arXiv preprint arXiv:2312.14238, 2023. 4" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 57, + 459, + 295, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 459, + 295, + 513 + ], + "spans": [ + { + "bbox": [ + 57, + 459, + 295, + 513 + ], + "type": "text", + "content": "[10] Zhe Chen, Weiyun Wang, Hao Tian, Shenglong Ye, Zhangwei Gao, Erfei Cui, Wenwen Tong, Kongzhi Hu, Jiapeng Luo, Zheng Ma, et al. How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites. arXiv preprint arXiv:2404.16821, 2024. 4" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 514, + 295, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 514, + 295, + 557 + ], + "spans": [ + { + "bbox": [ + 57, + 514, + 295, + 557 + ], + "type": "text", + "content": "[11] Davide Chicco, Matthijs J Warrens, and Giuseppe Jurman. The coefficient of determination r-squared is more informative than smape, mae, mape,mse and rmse in regression analysis evaluation. Peerj computer science, 7:e623, 2021. 11" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 559, + 295, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 559, + 295, + 579 + ], + "spans": [ + { + "bbox": [ + 57, + 559, + 295, + 579 + ], + "type": "text", + "content": "[12] Nikolas Coupland. How frequent are numbers? Language & Communication, 31(1):27-37, 2011. 13" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 57, + 581, + 295, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 581, + 295, + 645 + ], + "spans": [ + { + "bbox": [ + 57, + 581, + 295, + 645 + ], + "type": "text", + "content": "[13] Matt Deitke, Christopher Clark, Sangho Lee, Rohun Tripathi, Yue Yang, Jae Sung Park, Mohammadreza Salehi, Niklas Muennighoff, Kyle Lo, Luca Soldaini, et al. Molmo and pixmo: Open weights and open data for state-of-the-art multimodal models. arXiv preprint arXiv:2409.17146, 2024. 4" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 57, + 647, + 295, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 647, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 57, + 647, + 295, + 689 + ], + "type": "text", + "content": "[14] Zheyi Fan, Zihao Song, Di Wu, and Yixuan Zhu. Multibranch segmentation-guided attention network for crowd counting. Journal of Visual Communication and Image Representation, 97:103964, 2023. 8" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 57, + 692, + 295, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 692, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 57, + 692, + 295, + 713 + ], + "type": "text", + "content": "[15] Benito E Flores. A pragmatic view of accuracy measurement in forecasting. Omega, 14(2):93-98, 1986. 11" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 555, + 712 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 316, + 73, + 554, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 554, + 128 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 554, + 128 + ], + "type": "text", + "content": "[16] Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Bartra, and Devi Parikh. Making the v in vqa matter: Elevating the role of image understanding in visual question answering. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6904-6913, 2017. 8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 317, + 129, + 554, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 129, + 554, + 162 + ], + "spans": [ + { + "bbox": [ + 317, + 129, + 554, + 162 + ], + "type": "text", + "content": "[17] David Ha and Jürgen Schmidhuber. Recurrent world models facilitate policy evolution. Advances in neural information processing systems, 31, 2018. 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 163, + 554, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 163, + 554, + 228 + ], + "spans": [ + { + "bbox": [ + 317, + 163, + 554, + 228 + ], + "type": "text", + "content": "[18] Porter Jenkins, Kyle Armstrong, Stephen Nelson, Siddhesh Gotad, J Stockton Jenkins, Wade Wilkey, and Tanner Watts. Countnet3d: A 3d computer vision approach to infer counts of occluded objects. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 3008-3017, 2023. 8" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 230, + 554, + 260 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 230, + 554, + 260 + ], + "spans": [ + { + "bbox": [ + 317, + 230, + 554, + 260 + ], + "type": "text", + "content": "[19] Gaetano Kanizsa, Paolo Legrenzi, and Paolo Bozzi. Organization in vision: essays on gestalt perception. Praeger, 1979. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 264, + 554, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 264, + 554, + 306 + ], + "spans": [ + { + "bbox": [ + 316, + 264, + 554, + 306 + ], + "type": "text", + "content": "[20] Kaleb Kassaw, Francesco Luzi, Leslie M Collins, and Jordan M Malof. Are deep learning models robust to partial object occlusion in visual recognition tasks? arXiv preprint arXiv:2409.10775, 2024. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 309, + 553, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 309, + 553, + 330 + ], + "spans": [ + { + "bbox": [ + 316, + 309, + 553, + 330 + ], + "type": "text", + "content": "[21] Black Forest Labs. Flux. https://github.com/black-forest-labs/flux, 2024.7" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 332, + 554, + 386 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 332, + 554, + 386 + ], + "spans": [ + { + "bbox": [ + 317, + 332, + 554, + 386 + ], + "type": "text", + "content": "[22] Baiqi Li, Zhiqiu Lin, Wenxuan Peng, Jean de Dieu Nyandwi, Daniel Jiang, Zixian Ma, Simran Khanuja, Ranjay Krishna, Graham Neubig, and Deva Ramanan. Naturalbench: Evaluating vision-language models on natural adversarial samples. arXiv preprint arXiv:2410.14669, 2024. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 388, + 555, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 388, + 555, + 442 + ], + "spans": [ + { + "bbox": [ + 317, + 388, + 555, + 442 + ], + "type": "text", + "content": "[23] Dingkang Liang, Jiahao Xie, Zhikang Zou, Xiaqing Ye, Wei Xu, and Xiang Bai. Crowdclip: Unsupervised crowd counting via vision-language model. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2893-2903, 2023. 8" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 445, + 554, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 445, + 554, + 476 + ], + "spans": [ + { + "bbox": [ + 317, + 445, + 554, + 476 + ], + "type": "text", + "content": "[24] Fangyu Liu, Guy Edward Toh Emerson, and Nigel Collier. Visual spatial reasoning. Transactions of the Association for Computational Linguistics, 2023. 8" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 478, + 554, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 478, + 554, + 520 + ], + "spans": [ + { + "bbox": [ + 317, + 478, + 554, + 520 + ], + "type": "text", + "content": "[25] Baraka Jacob Maiseli. Optimum design of chamfer masks using symmetric mean absolute percentage error. EURASIP Journal on Image and Video Processing, 2019(1):74, 2019. 11" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 523, + 554, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 523, + 554, + 577 + ], + "spans": [ + { + "bbox": [ + 317, + 523, + 554, + 577 + ], + "type": "text", + "content": "[26] Roshanak Mirzaee and Hossein Rajaby. Spartqa: A textual question answering benchmark for spatial reasoning. In The 2021 Annual Conference of the North American Chapter of the Association for Computational Linguistics (NAACL-2021), 2021. 8" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 578, + 554, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 578, + 554, + 621 + ], + "spans": [ + { + "bbox": [ + 317, + 578, + 554, + 621 + ], + "type": "text", + "content": "[27] Ingrid R Olson, J Christopher Gatenby, Hoi-Chung Leung, Pawel Skudlarski, and John C Gore. Neuronal representation of occluded objects in the human brain. Neuropsychologia, 42(1):95-104, 2004. 1, 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 624, + 450, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 624, + 450, + 635 + ], + "spans": [ + { + "bbox": [ + 316, + 624, + 450, + 635 + ], + "type": "text", + "content": "[28] OpenAI. Hello gpt-4o, 2024. 4" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 636, + 554, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 636, + 554, + 667 + ], + "spans": [ + { + "bbox": [ + 317, + 636, + 554, + 667 + ], + "type": "text", + "content": "[29] OpenCompass Team. Openvlm leaderboard. https://huggingface.co/spaces/opencompass/open_vlmleaderboard, 2024. Accessed: 2024-11-13. 4" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 316, + 670, + 554, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 670, + 554, + 712 + ], + "spans": [ + { + "bbox": [ + 316, + 670, + 554, + 712 + ], + "type": "text", + "content": "[30] Yumiko OTSUKA, So KANAZAWA, and Masami K YAMAGUCHI. Development of modal and amodal completion in infants. Perception (London. Print), 35(9):1251-1264, 2006. 1, 2" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "spans": [ + { + "bbox": [ + 302, + 732, + 309, + 742 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 73, + 294, + 713 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 56, + 73, + 294, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 73, + 294, + 139 + ], + "spans": [ + { + "bbox": [ + 56, + 73, + 294, + 139 + ], + "type": "text", + "content": "[31] Ege Ozguroglu, Ruoshi Liu, Dídac Surís, Dian Chen, Achal Dave, Pavel Tokmakov, and Carl Vondrick. pix2gestalt: Amodal segmentation by synthesizing wholes. In 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 3931-3940. IEEE Computer Society, 2024. 8" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 140, + 294, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 140, + 294, + 183 + ], + "spans": [ + { + "bbox": [ + 56, + 140, + 294, + 183 + ], + "type": "text", + "content": "[32] Max Peeperkorn, Tom Kouwenhoven, Dan Brown, and Anna Jordanous. Is temperature the creativity parameter of large language models? arXiv preprint arXiv:2405.00492, 2024. 12" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 184, + 294, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 184, + 294, + 227 + ], + "spans": [ + { + "bbox": [ + 56, + 184, + 294, + 227 + ], + "type": "text", + "content": "[33] Muhammad Fetrat Qharabagh, Mohammadreza Ghofrani, and Kimon Fountoulakis. Lvlm-count: Enhancing the counting ability of large vision-language models. arXiv preprint arXiv:2412.00686, 2024. 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 228, + 294, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 228, + 294, + 261 + ], + "spans": [ + { + "bbox": [ + 56, + 228, + 294, + 261 + ], + "type": "text", + "content": "[34] Wenmo Qiu and Xinhan Di. Occ-mlm: Empowering multimodal large language model for the understanding of occluded objects. arXiv preprint arXiv:2410.01261, 2024. 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 262, + 294, + 327 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 262, + 294, + 327 + ], + "spans": [ + { + "bbox": [ + 56, + 262, + 294, + 327 + ], + "type": "text", + "content": "[35] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 8" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 328, + 294, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 328, + 294, + 360 + ], + "spans": [ + { + "bbox": [ + 56, + 328, + 294, + 360 + ], + "type": "text", + "content": "[36] Navid Rajabi and Jana Kosecka. Gsr-bench: A benchmark for grounded spatial reasoning evaluation via multimodal llms. arXiv preprint arXiv:2406.13246, 2024. 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 361, + 294, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 361, + 294, + 405 + ], + "spans": [ + { + "bbox": [ + 56, + 361, + 294, + 405 + ], + "type": "text", + "content": "[37] Viresh Ranjan, Udbhav Sharma, Thu Nguyen, and Minh Hoai. Learning to count everything. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3394-3403, 2021. 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 406, + 294, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 406, + 294, + 459 + ], + "spans": [ + { + "bbox": [ + 56, + 406, + 294, + 459 + ], + "type": "text", + "content": "[38] Kaziwa Saleh, Sándor Szenási, and Zoltán Vámossy. Mask guided gated convolution for amodal content completion. In 2024 IEEE 22nd Jubilee International Symposium on Intelligent Systems and Informatics (SISY), pages 000321-000326. IEEE, 2024. 8" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 460, + 294, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 460, + 294, + 525 + ], + "spans": [ + { + "bbox": [ + 56, + 460, + 294, + 525 + ], + "type": "text", + "content": "[39] Alane Suhr, Mike Lewis, James Yeh, and Yoav Artzi. A corpus of natural language for visual reasoning. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 217-223, Vancouver, Canada, 2017. Association for Computational Linguistics. 8" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 527, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 527, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 527, + 294, + 713 + ], + "type": "text", + "content": "[40] Kimi Team, Angang Du, Bohong Yin, Bowei Xing, Bowen Qu, Bowen Wang, Cheng Chen, Chenlin Zhang, Chenzhuang Du, Chu Wei, Congcong Wang, Dehao Zhang, Dikang Du, Dongliang Wang, Enming Yuan, Enzhe Lu, Fang Li, Flood Sung, Guangda Wei, Guokun Lai, Han Zhu, Hao Ding, Hao Hu, Hao Yang, Hao Zhang, Haoning Wu, Haotian Yao, Haoyu Lu, Heng Wang, Hongcheng Gao, Huabin Zheng, Jiaming Li, Jianlin Su, Jianzhou Wang, Jiaqi Deng, Jiezhong Qiu, Jin Xie, Jinhong Wang, Jingyuan Liu, Junjie Yan, Kun Ouyang, Liang Chen, Lin Sui, Longhui Yu, Mengfan Dong, Mengnan Dong, Nuo Xu, Pengyu Cheng, Qizheng Gu, Runjie Zhou, Shaowei Liu, Sihan Cao, Tao Yu, Tianhui Song, Tongtong Bai, Wei Song, Weiran He, Weixiao Huang, Weixin Xu, Xiaokun Yuan, Xingcheng Yao, Xingzhe Wu, Xinxing Zu, Xinyu Zhou, Xinyuan Wang, Y. Charles, Yan Zhong, Yang Li, Yangyang Hu, Yanru Chen, Yejie Wang, Yibo Liu, Yibo Miao, Yidao Qin, Yimin Chen" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 486 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 333, + 73, + 553, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 553, + 128 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 553, + 128 + ], + "type": "text", + "content": "Yiping Bao, Yiqin Wang, Yongsheng Kang, Yuanxin Liu, Yulun Du, Yuxin Wu, Yuzhi Wang, Yuzi Yan, Zaida Zhou, Zhaowei Li, Zhejun Jiang, Zheng Zhang, Zhilin Yang, Zhiqi Huang, Zihao Huang, Zijia Zhao, and Ziwei Chen. Kimi-VL technical report, 2025. 4" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 316, + 129, + 553, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 129, + 553, + 205 + ], + "spans": [ + { + "bbox": [ + 316, + 129, + 553, + 205 + ], + "type": "text", + "content": "[41] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 4" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 316, + 207, + 553, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 207, + 553, + 250 + ], + "spans": [ + { + "bbox": [ + 316, + 207, + 553, + 250 + ], + "type": "text", + "content": "[42] Wei-Yao Wang, Zhao Wang, Helen Suzuki, and Yoshiyuki Kobayashi. Seeing is understanding: Unlocking causal attention into modality-mutual attention for multimodal llms. arXiv preprint arXiv:2503.02597, 2025. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 316, + 251, + 553, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 251, + 553, + 283 + ], + "spans": [ + { + "bbox": [ + 316, + 251, + 553, + 283 + ], + "type": "text", + "content": "[43] Yiding Wang, Yuxin Qin, and Jiali Cui. Occlusion robust wheat ear counting algorithm based on deep learning. Frontiers in Plant Science, 12:645899, 2021. 8" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 285, + 553, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 285, + 553, + 328 + ], + "spans": [ + { + "bbox": [ + 316, + 285, + 553, + 328 + ], + "type": "text", + "content": "[44] Yongjie Wang, Feng Wang, and Dongyang Huang. Dual-branch counting method for dense crowd based on self-attention mechanism. Expert Systems with Applications, 236:121272, 2024. 8" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 331, + 553, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 331, + 553, + 352 + ], + "spans": [ + { + "bbox": [ + 316, + 331, + 553, + 352 + ], + "type": "text", + "content": "[45] Karen Wynn. Children's understanding of counting. Cognition, 36(2):155-193, 1990. 1, 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 354, + 553, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 354, + 553, + 397 + ], + "spans": [ + { + "bbox": [ + 316, + 354, + 553, + 397 + ], + "type": "text", + "content": "[46] Katherine Xu, Lingzhi Zhang, and Jianbo Shi. Amodal completion via progressive mixed context diffusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9099-9109, 2024. 8" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 399, + 553, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 399, + 553, + 442 + ], + "spans": [ + { + "bbox": [ + 316, + 399, + 553, + 442 + ], + "type": "text", + "content": "[47] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. Minicpm-v: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024. 4" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 443, + 553, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 443, + 553, + 486 + ], + "spans": [ + { + "bbox": [ + 316, + 443, + 553, + 486 + ], + "type": "text", + "content": "[48] Lifang Zhou, Songlin Rao, Weisheng Li, Bo Hu, and Bo Sun. Multi-branch progressive embedding network for crowd counting. Image and Vision Computing, page 105140, 2024. 8" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 110, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 110, + 85 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 110, + 85 + ], + "type": "text", + "content": "Appendix" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 92, + 194, + 106 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 92, + 194, + 106 + ], + "spans": [ + { + "bbox": [ + 56, + 92, + 194, + 106 + ], + "type": "text", + "content": "A. Implementation Details" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 112, + 148, + 125 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 112, + 148, + 125 + ], + "spans": [ + { + "bbox": [ + 56, + 112, + 148, + 125 + ], + "type": "text", + "content": "A.1. Metric Details" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 129, + 296, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 129, + 296, + 370 + ], + "spans": [ + { + "bbox": [ + 55, + 129, + 296, + 370 + ], + "type": "text", + "content": "We use symmetric mean percent error (sMAPE) as the primary metric for our benchmarks due to its resistance to bias for under/over predictions and small/large ground truths [25]. The standard metric for a counting benchmark is mean average error (MAE). MAE is popular, but heavily penalizes predictions that deviate by a small margin from big ground truths, highlighting the necessity for a metric that gives equal weighting to all questions. Mean average percent error (MAPE) initially seems appealing but is disproportionately inflated for small ground truths and is biased towards overpredictions. Mean square error (MSE) and root mean square error (RMSE) are also commonly used but are very sensitive to outliers because they square the error. Intuitively, performing well on almost all questions and poorly on a small subset should score better than consistently being wrong. Among commonly-used metrics, sMAPE is the only metric that evaluates performance in relation to the distribution of ground truth elements [11]. There are two common definitions [15] for sMAPE, but we use the one that scales to " + }, + { + "bbox": [ + 55, + 129, + 296, + 370 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 55, + 129, + 296, + 370 + ], + "type": "text", + "content": ". sMAPE is given by:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 380, + 295, + 410 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 380, + 295, + 410 + ], + "spans": [ + { + "bbox": [ + 105, + 380, + 295, + 410 + ], + "type": "interline_equation", + "content": "\\mathrm {s M A P E} = 1 0 0 \\cdot \\frac {1}{n} \\sum_ {i = 1} ^ {n} \\frac {\\left| y _ {i} - \\hat {y} _ {i} \\right|}{\\left| y _ {i} \\right| + \\left| \\hat {y} _ {i} \\right|} \\tag {2}", + "image_path": "765e128e90cd1a75b722f72d292b40dfaeab439abf0e058ca783f9444768b377.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 416, + 296, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 416, + 296, + 477 + ], + "spans": [ + { + "bbox": [ + 55, + 416, + 296, + 477 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 416, + 296, + 477 + ], + "type": "inline_equation", + "content": "y_{i}" + }, + { + "bbox": [ + 55, + 416, + 296, + 477 + ], + "type": "text", + "content": " represents the actual values, " + }, + { + "bbox": [ + 55, + 416, + 296, + 477 + ], + "type": "inline_equation", + "content": "\\hat{y}_i" + }, + { + "bbox": [ + 55, + 416, + 296, + 477 + ], + "type": "text", + "content": " represents the predicted values, and " + }, + { + "bbox": [ + 55, + 416, + 296, + 477 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 55, + 416, + 296, + 477 + ], + "type": "text", + "content": " is the number of observations. sMAPE is capped at " + }, + { + "bbox": [ + 55, + 416, + 296, + 477 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 55, + 416, + 296, + 477 + ], + "type": "text", + "content": ", providing a finite scoring range. This feature is ideal for challenging tasks like ours, as it penalizes model responses that fail to produce an answer." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 484, + 151, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 484, + 151, + 498 + ], + "spans": [ + { + "bbox": [ + 55, + 484, + 151, + 498 + ], + "type": "text", + "content": "A.2. Output Tokens" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 502, + 296, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 502, + 296, + 574 + ], + "spans": [ + { + "bbox": [ + 55, + 502, + 296, + 574 + ], + "type": "text", + "content": "To maximize the VLM's chance at success, we allocate a high number of output tokens to generate a rationale and output. This varies per model. We give 4000 tokens to InternVL2, 2000 tokens to Molmo, and 8192 tokens to Qwen2VL, following their max output lengths. For GPT-40, we use the default of 4096 tokens." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 585, + 261, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 585, + 261, + 598 + ], + "spans": [ + { + "bbox": [ + 55, + 585, + 261, + 598 + ], + "type": "text", + "content": "B. CAPTURE Dataset Creation Details" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 605, + 295, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 605, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 605, + 295, + 715 + ], + "type": "text", + "content": "The following expands upon Sec. 2.2. While FSC-147, a diverse counting dataset with manual annotations, is a strong starting point, it cannot immediately be adapted to our task. To make the task of amodal counting solvable, our dataset requires images with patterns in them. A person (or model) can infer how the pattern would continue and thus accurately predict the total number. For questions to be answerable, the dataset's images must be filtered down to represent patterns a model or person could recognize." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 72, + 553, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 180 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 180 + ], + "type": "text", + "content": "Our filtering process follows two stages. First, we prompt GPT-4o to determine whether the objects were arranged in a pattern. Second, if the model responded with \"no\", the images were immediately discarded. If the model output was \"yes\", the log probability of the token is stored. Empirically, we found that higher log probability values (i.e. higher confidence scores) corresponded to more well-defined patterns in the image. Thus, we use the log probabilities for filtering." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 186, + 555, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 186, + 555, + 342 + ], + "spans": [ + { + "bbox": [ + 313, + 186, + 555, + 342 + ], + "type": "text", + "content": "Specifically, let " + }, + { + "bbox": [ + 313, + 186, + 555, + 342 + ], + "type": "inline_equation", + "content": "P_{\\mathrm{yes}}" + }, + { + "bbox": [ + 313, + 186, + 555, + 342 + ], + "type": "text", + "content": " be the log probability of the \"yes\" token and " + }, + { + "bbox": [ + 313, + 186, + 555, + 342 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 313, + 186, + 555, + 342 + ], + "type": "text", + "content": " denote the threshold for determining how well-defined a pattern is. To filter the images based on pattern rigidity, we apply the following condition: " + }, + { + "bbox": [ + 313, + 186, + 555, + 342 + ], + "type": "inline_equation", + "content": "e^{P_{\\mathrm{yes}}} \\geq T" + }, + { + "bbox": [ + 313, + 186, + 555, + 342 + ], + "type": "text", + "content": ". This inequality yields 991 images from the original dataset (16.12%). Next, we manually filter each of the selected images to ensure that they indeed contain patterns and feature a countable number of objects, excluding 34 images. Afterward, we manually place a \"fair\" occluding box in each image, i.e. a box that leaves sufficient portions of the pattern visible, such that the pattern can still be inferred from the unoccluded portions of the image. Occluding boxes were also chosen with varying positions and sizes in the image." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 370, + 434, + 384 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 370, + 434, + 384 + ], + "spans": [ + { + "bbox": [ + 314, + 370, + 434, + 384 + ], + "type": "text", + "content": "C. Additional Analysis" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 396, + 554, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 396, + 554, + 494 + ], + "spans": [ + { + "bbox": [ + 313, + 396, + 554, + 494 + ], + "type": "text", + "content": "Here we provide additional experiments that attempt to either increase model performance on CAPTURE or dissect the reasons behind poor model performance. Chain-of-Thought inhibits model performance, while temperature backoff slightly improves performance. Additionally, we find that models struggle at counting just occluded objects, are overconfident in occluded settings, and are biased to predict specific numbers." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 517, + 555, + 532 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 517, + 555, + 532 + ], + "spans": [ + { + "bbox": [ + 313, + 517, + 555, + 532 + ], + "type": "text", + "content": "C.1. Chain-of-Thought reduces model performance" + } + ] + } + ], + "index": 14 + }, + { + "type": "table", + "bbox": [ + 339, + 556, + 528, + 616 + ], + "blocks": [ + { + "bbox": [ + 339, + 556, + 528, + 616 + ], + "lines": [ + { + "bbox": [ + 339, + 556, + 528, + 616 + ], + "spans": [ + { + "bbox": [ + 339, + 556, + 528, + 616 + ], + "type": "table", + "html": "
MethodCAPTURErealCAPTUREsynthetic
GPT-4o14.759.71
GPT-4o w/ CoT14.947.73
Qwen229.3311.74
Qwen2 w/ CoT31.5737.81
", + "image_path": "8414fceefb0333f2f72e9353299e00198f69eb76a39b1a315c06104d0376da7a.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 352, + 617, + 515, + 628 + ], + "lines": [ + { + "bbox": [ + 352, + 617, + 515, + 628 + ], + "spans": [ + { + "bbox": [ + 352, + 617, + 515, + 628 + ], + "type": "text", + "content": "Table 6. CoT experiments (metric: sMAPE)." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 654, + 554, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 654, + 554, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 654, + 554, + 715 + ], + "type": "text", + "content": "During development, we experimented with several common strategies including CoT. In Tab. 6, we find that CoT reduces model performance except in the occluded synthetic scenario, most likely because the included examples are very similar to the test prompt." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 56, + 70, + 553, + 198 + ], + "blocks": [ + { + "bbox": [ + 56, + 70, + 553, + 198 + ], + "lines": [ + { + "bbox": [ + 56, + 70, + 553, + 198 + ], + "spans": [ + { + "bbox": [ + 56, + 70, + 553, + 198 + ], + "type": "table", + "html": "
ModelError (%) (↓)
RealSynthetic
UnoccludedOccludedUnoccludedOccluded
Originalw/ backoff (Δ)Originalw/ backoff (Δ)Originalw/ backoff (Δ)Originalw/ backoff (Δ)
GPT-4o13.3412.57 (−0.77)14.7514.39 (−0.36)5.905.93 (+0.03)9.719.23 (−0.48)
InternVL226.1727.09 (+0.92)32.9032.37 (−0.53)16.4415.59 (−0.85)17.5716.24 (−1.33)
Molmo25.9021.23 (−4.67)32.4928.17 (−4.32)8.402.88 (−5.52)17.7315.85 (−1.88)
Qwen2VL18.9619.40 (+0.44)29.3328.47 (−0.86)6.636.66 (+0.03)11.7411.51 (−0.23)
Avg. of 4 VLMs21.0920.07 (−1.02)27.3725.85 (−1.52)9.347.76 (−1.58)14.1913.21 (−0.98)
", + "image_path": "7ad2418d050b057c9166898787a612277b8a7a6e4806fefeed67729879f8a3f8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 259, + 295, + 285 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 259, + 295, + 285 + ], + "spans": [ + { + "bbox": [ + 55, + 259, + 295, + 285 + ], + "type": "text", + "content": "C.2. Temperature backoff slightly improves model performance" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 291, + 295, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 291, + 295, + 365 + ], + "spans": [ + { + "bbox": [ + 55, + 291, + 295, + 365 + ], + "type": "text", + "content": "To improve VLM performance on CAPTURE, we address a trend we established during early testing. Most of the time, the VLM fails by reaching an incorrect answer. Sometimes, however, our benchmark can cause VLMs to produce a long and irrelevant response that strays from the original prompt, leading to the worst possible sMAPE score (100%)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 365, + 295, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 365, + 295, + 546 + ], + "spans": [ + { + "bbox": [ + 54, + 365, + 295, + 546 + ], + "type": "text", + "content": "To reduce the number of skipped questions, we experiment with temperature backoff, which iteratively decreases the sampling temperature. Because the answer extractor can immediately identify an incoherent output, we can regenerate the response with a lower temperature to get the model to answer the task properly. Consistent with our findings, Peeperkorn et al. [32] also finds that lower temperatures increase coherence in VLMs, thereby enhancing their chances of maintaining relevance to the prompt. Therefore, temperature backoff gives VLMs a better chance of achieving higher scores. Each time the answer extractor returns an empty answer because the VLMs produced an incoherent answer, we reduce the temperature by 0.1 (starting from 1.0) until it reaches 0.0, at which point the example is skipped." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 555, + 295, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 555, + 295, + 676 + ], + "spans": [ + { + "bbox": [ + 55, + 555, + 295, + 676 + ], + "type": "text", + "content": "Models perform slightly better with temperature backoff. We introduced temperature backoff to reduce model incoherence, and it performed fairly well. As shown in Tab. 7 (bottom), this method slightly improves performance across each model, resulting in an average error reduction of " + }, + { + "bbox": [ + 55, + 555, + 295, + 676 + ], + "type": "inline_equation", + "content": "5.78\\%" + }, + { + "bbox": [ + 55, + 555, + 295, + 676 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 55, + 555, + 295, + 676 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{real}}" + }, + { + "bbox": [ + 55, + 555, + 295, + 676 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 55, + 555, + 295, + 676 + ], + "type": "inline_equation", + "content": "5.45\\%" + }, + { + "bbox": [ + 55, + 555, + 295, + 676 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 55, + 555, + 295, + 676 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{synthetic}}" + }, + { + "bbox": [ + 55, + 555, + 295, + 676 + ], + "type": "text", + "content": ". Temperature backoff essentially allows the model to reattempt the question if it fails to respond to the prompt. Similar to previous results, positive results from reattempts highlight VLMs' weak reasoning abilities." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 329, + 257, + 541, + 365 + ], + "blocks": [ + { + "bbox": [ + 55, + 205, + 555, + 240 + ], + "lines": [ + { + "bbox": [ + 55, + 205, + 555, + 240 + ], + "spans": [ + { + "bbox": [ + 55, + 205, + 555, + 240 + ], + "type": "text", + "content": "Table 7. Comparison of models on CAPTURE across four scenarios (CAPTURE" + }, + { + "bbox": [ + 55, + 205, + 555, + 240 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 55, + 205, + 555, + 240 + ], + "type": "text", + "content": " vs. CAPTURE" + }, + { + "bbox": [ + 55, + 205, + 555, + 240 + ], + "type": "inline_equation", + "content": "^{\\text{synthetic}}" + }, + { + "bbox": [ + 55, + 205, + 555, + 240 + ], + "type": "text", + "content": ", Unoccluded vs. Occluded). \"Original\" indicates no backoff; \"w/ backoff\" indicates applying backoff, with " + }, + { + "bbox": [ + 55, + 205, + 555, + 240 + ], + "type": "inline_equation", + "content": "\\Delta = (w/ backoff) - (Original)" + }, + { + "bbox": [ + 55, + 205, + 555, + 240 + ], + "type": "text", + "content": ". Negative " + }, + { + "bbox": [ + 55, + 205, + 555, + 240 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 55, + 205, + 555, + 240 + ], + "type": "text", + "content": " values indicate an improvement." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 329, + 257, + 541, + 365 + ], + "lines": [ + { + "bbox": [ + 329, + 257, + 541, + 365 + ], + "spans": [ + { + "bbox": [ + 329, + 257, + 541, + 365 + ], + "type": "table", + "html": "
ModelError (%) [↓]
All ObjectsOnly Occluded
GPT-4o14.7526.13 (+11.38)
InternVL232.9075.82 (+42.92)
Molmo32.4996.79 (+64.30)
Qwen2VL29.3332.89 (+3.56)
Avg. of 4 VLMs27.3757.91 (+30.54)
", + "image_path": "a0b1d18c5608668c32ad94cfb339eefcb95710ae93d724ddeebaa909da2aae77.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 373, + 555, + 407 + ], + "lines": [ + { + "bbox": [ + 313, + 373, + 555, + 407 + ], + "spans": [ + { + "bbox": [ + 313, + 373, + 555, + 407 + ], + "type": "text", + "content": "Table 8. VLM sMAPE for counting all objects and counting only the occluded objects in CAPTURE" + }, + { + "bbox": [ + 313, + 373, + 555, + 407 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 313, + 373, + 555, + 407 + ], + "type": "text", + "content": ". Metric: sMAPE (lower is better)." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 428, + 555, + 453 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 428, + 555, + 453 + ], + "spans": [ + { + "bbox": [ + 313, + 428, + 555, + 453 + ], + "type": "text", + "content": "C.3. Models struggle at counting just occluded objects" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 312, + 458, + 555, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 458, + 555, + 567 + ], + "spans": [ + { + "bbox": [ + 312, + 458, + 555, + 567 + ], + "type": "text", + "content": "We separately test whether models can count only the occluded objects (not including the visible objects) in an image. Here, as Tab. 8 demonstrates, the models perform especially poorly in this task, with high error rates across all models. Therefore, we can conclude that occlusion and counting are uniquely difficult for the VLMs, and that the drop in performance between unoccluded and occluded settings in Tab. 2 is likely due to a poor ability to count occluded objects." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 575, + 552, + 588 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 575, + 552, + 588 + ], + "spans": [ + { + "bbox": [ + 313, + 575, + 552, + 588 + ], + "type": "text", + "content": "C.4. Models are overconfident in occluded settings" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 312, + 594, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 594, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 312, + 594, + 555, + 713 + ], + "type": "text", + "content": "We test the uncertainty with two different methods of obtaining confidence on Qwen2VL. In the first method, we prompt Qwen2VL for its confidence in the answer. For the second method, we generate 20 responses for every question in our VQA and calculate the confidence as the percentage of times the most common answer was generated. These results can be seen in Fig. 9 and Fig. 10 respectively. In both reliability curves, there is a slight trend that the model's confidence is negatively correlated with the error, which is the desired outcome. In " + }, + { + "bbox": [ + 312, + 594, + 555, + 713 + ], + "type": "inline_equation", + "content": "\\mathrm{CAPTURE}^{\\mathrm{real}}" + }, + { + "bbox": [ + 312, + 594, + 555, + 713 + ], + "type": "text", + "content": ", how-" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 55, + 693, + 295, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 693, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 693, + 295, + 713 + ], + "type": "text", + "content": "2We set " + }, + { + "bbox": [ + 55, + 693, + 295, + 713 + ], + "type": "inline_equation", + "content": "T = 0.9999" + }, + { + "bbox": [ + 55, + 693, + 295, + 713 + ], + "type": "text", + "content": " based on manual evaluation, finding it resulted in fewer false positives." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 296, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 296, + 156 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 296, + 156 + ], + "type": "text", + "content": "ever, the correlation is much stronger. While the models are somewhat calibrated (with generally lower confidence on higher-error examples, there are still outliers in prompted confidence for CAPTURE" + }, + { + "bbox": [ + 55, + 72, + 296, + 156 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 55, + 72, + 296, + 156 + ], + "type": "text", + "content": " occluded and sampled confidence for CAPTURE" + }, + { + "bbox": [ + 55, + 72, + 296, + 156 + ], + "type": "inline_equation", + "content": "^{\\text{synthetic}}" + }, + { + "bbox": [ + 55, + 72, + 296, + 156 + ], + "type": "text", + "content": " occluded. This indicates that not only do the models perform worse under occlusion, but they can also be overconfident." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 69, + 187, + 272, + 342 + ], + "blocks": [ + { + "bbox": [ + 69, + 187, + 272, + 342 + ], + "lines": [ + { + "bbox": [ + 69, + 187, + 272, + 342 + ], + "spans": [ + { + "bbox": [ + 69, + 187, + 272, + 342 + ], + "type": "image", + "image_path": "c611c7dddf48809c8a6e0d57ebe548a151fb1cc3df349e754f8ed43ae455fa99.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 355, + 295, + 376 + ], + "lines": [ + { + "bbox": [ + 55, + 355, + 295, + 376 + ], + "spans": [ + { + "bbox": [ + 55, + 355, + 295, + 376 + ], + "type": "text", + "content": "Figure 9. Reliability curve of prompting model for confidence vs. sMAPE." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 69, + 422, + 272, + 578 + ], + "blocks": [ + { + "bbox": [ + 69, + 422, + 272, + 578 + ], + "lines": [ + { + "bbox": [ + 69, + 422, + 272, + 578 + ], + "spans": [ + { + "bbox": [ + 69, + 422, + 272, + 578 + ], + "type": "image", + "image_path": "12bebc6d15e6500bbabf290ea88f67842250f7147b933634b3e9da5cb0518d18.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 590, + 295, + 611 + ], + "lines": [ + { + "bbox": [ + 55, + 590, + 295, + 611 + ], + "spans": [ + { + "bbox": [ + 55, + 590, + 295, + 611 + ], + "type": "text", + "content": "Figure 10. Reliability curve of sampling model for confidence vs. sMAPE." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 635, + 295, + 648 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 635, + 295, + 648 + ], + "spans": [ + { + "bbox": [ + 55, + 635, + 295, + 648 + ], + "type": "text", + "content": "C.5. Models are biased to predict specific numbers." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "type": "text", + "content": "To examine where models frequently err, we generated a confusion matrix for every model based on CAPTURE" + }, + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "type": "inline_equation", + "content": "^{\\text{synthetic}}" + }, + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "type": "text", + "content": " results (shown in Appendix C.5). The y-axis represents the ground truth values and the x-axis represents the model's answers. We find that models often over-predict" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 72, + 555, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 205 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 205 + ], + "type": "text", + "content": "numbers associated with common counts in real life: GPT-40 tends to predict numbers like 8, 9, 10, and 12, which are all non-prime numbers (i.e. can be arranged into a grid) and common groupings of objects. For example, 12 is a common grouping (dozens) and allows arrangements into 3x4 or 2x6 grids. InternVL and Qwen2VL over-predict 5 and 10, aligning with how humans conceptualize numbers. Indeed, Coupland [12] found that numbers 5, 10, 20, and other round numbers appear disproportionately more in online texts. Molmo has no correlation with these factors, possibly due to its unique \"point and count\" ability." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 214, + 408, + 228 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 214, + 408, + 228 + ], + "spans": [ + { + "bbox": [ + 314, + 214, + 408, + 228 + ], + "type": "text", + "content": "D. VLM Prompts" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 234, + 555, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 234, + 555, + 258 + ], + "spans": [ + { + "bbox": [ + 313, + 234, + 555, + 258 + ], + "type": "text", + "content": "We use a 100-example validation set for each setting to select the best prompt, which we report below." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 324, + 269, + 544, + 294 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 269, + 544, + 294 + ], + "spans": [ + { + "bbox": [ + 324, + 269, + 544, + 294 + ], + "type": "text", + "content": "Prompt for GPT-4o on CAPTURE" + }, + { + "bbox": [ + 324, + 269, + 544, + 294 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 324, + 269, + 544, + 294 + ], + "type": "text", + "content": " unoccluded split." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 324, + 300, + 544, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 300, + 544, + 348 + ], + "spans": [ + { + "bbox": [ + 324, + 300, + 544, + 348 + ], + "type": "text", + "content": "Count the exact number of [object] in the image. Assume the pattern of [object] continues behind any black box. Provide the total number of [object] as if the black box were not there." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 325, + 370, + 544, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 370, + 544, + 395 + ], + "spans": [ + { + "bbox": [ + 325, + 370, + 544, + 395 + ], + "type": "text", + "content": "Prompt for InternVL2 on CAPTURE" + }, + { + "bbox": [ + 325, + 370, + 544, + 395 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 325, + 370, + 544, + 395 + ], + "type": "text", + "content": " unoccluded split." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 325, + 401, + 544, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 401, + 544, + 426 + ], + "spans": [ + { + "bbox": [ + 325, + 401, + 544, + 426 + ], + "type": "text", + "content": "Your task is to count objects in the image. First, state what the pattern is, then give your final count." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 325, + 449, + 543, + 474 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 449, + 543, + 474 + ], + "spans": [ + { + "bbox": [ + 325, + 449, + 543, + 474 + ], + "type": "text", + "content": "Prompt for Molmo on CAPTURE" + }, + { + "bbox": [ + 325, + 449, + 543, + 474 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 325, + 449, + 543, + 474 + ], + "type": "text", + "content": " unoccluded split." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 324, + 480, + 544, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 480, + 544, + 529 + ], + "spans": [ + { + "bbox": [ + 324, + 480, + 544, + 529 + ], + "type": "text", + "content": "Count the exact number of [object] in the image. Only count [object] that are visible within the frame. If [object] are partially in the frame (i.e. if any part of [object] are visible), count it." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 325, + 552, + 544, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 552, + 544, + 578 + ], + "spans": [ + { + "bbox": [ + 325, + 552, + 544, + 578 + ], + "type": "text", + "content": "Prompt for Qwen2VL on CAPTURE" + }, + { + "bbox": [ + 325, + 552, + 544, + 578 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 325, + 552, + 544, + 578 + ], + "type": "text", + "content": " unoccluded split." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 324, + 583, + 544, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 583, + 544, + 691 + ], + "spans": [ + { + "bbox": [ + 324, + 583, + 544, + 691 + ], + "type": "text", + "content": "Count the exact number of [object] in the image. Assume the pattern of [object] continues behind any black box. Provide the total number of [object] as if the black box were not there. Only count [object] that are visible within the frame (or would be visible without the occluding box). If [object] are partially in the frame (i.e. if any part of [object] are visible), count it. If the [object] would be partially in the frame without the occluding box, count it." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 69, + 303, + 274 + ], + "blocks": [ + { + "bbox": [ + 56, + 69, + 303, + 274 + ], + "lines": [ + { + "bbox": [ + 56, + 69, + 303, + 274 + ], + "spans": [ + { + "bbox": [ + 56, + 69, + 303, + 274 + ], + "type": "image", + "image_path": "08f6c9e3444d44c81dce730c5794b8ed561370ede8d77fbdb08e1d8da80f2eaa.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 305, + 70, + 552, + 274 + ], + "blocks": [ + { + "bbox": [ + 305, + 70, + 552, + 274 + ], + "lines": [ + { + "bbox": [ + 305, + 70, + 552, + 274 + ], + "spans": [ + { + "bbox": [ + 305, + 70, + 552, + 274 + ], + "type": "image", + "image_path": "bc42a00ebe464d45c0fda199fa3866d224671941005433a1ffed10ada4f7fc65.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 55, + 277, + 302, + 483 + ], + "blocks": [ + { + "bbox": [ + 55, + 277, + 302, + 483 + ], + "lines": [ + { + "bbox": [ + 55, + 277, + 302, + 483 + ], + "spans": [ + { + "bbox": [ + 55, + 277, + 302, + 483 + ], + "type": "image", + "image_path": "b764ec1f7d7b2599e57b7f0c6a90c64e02b3a7d1446773a3ab1bb37a3fede70d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 125, + 493, + 485, + 506 + ], + "lines": [ + { + "bbox": [ + 125, + 493, + 485, + 506 + ], + "spans": [ + { + "bbox": [ + 125, + 493, + 485, + 506 + ], + "type": "text", + "content": "Figure 11. Confusion matrix: predicted vs. ground truth counts for CAPTURE" + }, + { + "bbox": [ + 125, + 493, + 485, + 506 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 125, + 493, + 485, + 506 + ], + "type": "text", + "content": "s occluded split." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 306, + 277, + 552, + 483 + ], + "blocks": [ + { + "bbox": [ + 306, + 277, + 552, + 483 + ], + "lines": [ + { + "bbox": [ + 306, + 277, + 552, + 483 + ], + "spans": [ + { + "bbox": [ + 306, + 277, + 552, + 483 + ], + "type": "image", + "image_path": "6bbc087c1a75124b22630e02712bd7e1f1c4ff2a89de05e3a88dda5c6c31f81a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 528, + 285, + 553 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 528, + 285, + 553 + ], + "spans": [ + { + "bbox": [ + 67, + 528, + 285, + 553 + ], + "type": "text", + "content": "Prompt for GPT-4o, InternVL2, and Qwen2VL on CAPTURE" + }, + { + "bbox": [ + 67, + 528, + 285, + 553 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 67, + 528, + 285, + 553 + ], + "type": "text", + "content": " occluded split." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 66, + 559, + 286, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 559, + 286, + 704 + ], + "spans": [ + { + "bbox": [ + 66, + 559, + 286, + 704 + ], + "type": "text", + "content": "Count the exact number of [object] in the image. Assume the pattern of [object] continues behind any black box. Provide the total number of [object] as if the black box were not there. Only count [object] that are visible within the frame (or would be visible without the occluding box). If [object] are partially in the frame (i.e. if any part of [object] are visible), count it. If the [object] would be partially in the frame without the occluding box, count it. Molmo: Your task is to count objects in the image. Assume the pattern of [object] continues behind the black box. First, state what the pattern is, then give your final count." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 325, + 529, + 543, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 529, + 543, + 555 + ], + "spans": [ + { + "bbox": [ + 325, + 529, + 543, + 555 + ], + "type": "text", + "content": "Prompt for Molmo on CAPTURE" + }, + { + "bbox": [ + 325, + 529, + 543, + 555 + ], + "type": "inline_equation", + "content": "^{\\text{real}}" + }, + { + "bbox": [ + 325, + 529, + 543, + 555 + ], + "type": "text", + "content": " occluded split." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 324, + 560, + 544, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 560, + 544, + 608 + ], + "spans": [ + { + "bbox": [ + 324, + 560, + 544, + 608 + ], + "type": "text", + "content": "Your task is to count objects in the image. Assume the pattern of [object] continues behind the black box. First, state what the pattern is, then give your final count." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 325, + 639, + 544, + 663 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 639, + 544, + 663 + ], + "spans": [ + { + "bbox": [ + 325, + 639, + 544, + 663 + ], + "type": "text", + "content": "Prompt for GPT-4o on CAPTUREsynthetic unoccluded split." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 325, + 670, + 543, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 670, + 543, + 694 + ], + "spans": [ + { + "bbox": [ + 325, + 670, + 543, + 694 + ], + "type": "text", + "content": "Your task is to count objects in the image. First, state what the pattern is, then give your final count." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 311, + 742 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 66, + 76, + 284, + 101 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 76, + 284, + 101 + ], + "spans": [ + { + "bbox": [ + 66, + 76, + 284, + 101 + ], + "type": "text", + "content": "Prompt for InternVL2 on CAPTUREsynthetic unoccluded split." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 107, + 284, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 107, + 284, + 156 + ], + "spans": [ + { + "bbox": [ + 66, + 107, + 284, + 156 + ], + "type": "text", + "content": "Count the exact number of [dot shape]s in the image. Only count [dot shape]s that are visible within the frame. If [dot shape]s are partially in the frame (i.e. if any part of [dot shape]s are visible), count it." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 66, + 180, + 284, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 180, + 284, + 205 + ], + "spans": [ + { + "bbox": [ + 66, + 180, + 284, + 205 + ], + "type": "text", + "content": "Prompt for Molmo on CAPTUREsynthetic unoccluded split." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 66, + 212, + 284, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 212, + 284, + 247 + ], + "spans": [ + { + "bbox": [ + 66, + 212, + 284, + 247 + ], + "type": "text", + "content": "Count the exact number of [dot shape]s in the image. Only count [dot shape]s that are visible within the frame." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 66, + 270, + 284, + 295 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 270, + 284, + 295 + ], + "spans": [ + { + "bbox": [ + 66, + 270, + 284, + 295 + ], + "type": "text", + "content": "Prompt for Qwen2VL on CAPTUREsynthetic unoccluded split." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 66, + 302, + 285, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 302, + 285, + 421 + ], + "spans": [ + { + "bbox": [ + 66, + 302, + 285, + 421 + ], + "type": "text", + "content": "Count the exact number of [dot shape]s in the image. Assume the pattern of [dot shape]s continues behind any black box. Provide the total number of [dot shape]s as if the black box were not there. Only count [dot shape]s that are visible within the frame (or would be visible without the occluding box). If [dot shape]s are partially in the frame (i.e. if any part of [dot shape]s are visible), count it. If the [dot shape]s would be partially in the frame without the occluding box, count it." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 66, + 445, + 284, + 469 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 445, + 284, + 469 + ], + "spans": [ + { + "bbox": [ + 66, + 445, + 284, + 469 + ], + "type": "text", + "content": "Prompt for GPT-4o and Molmo on CAP-TUREsynthetic occluded split." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 66, + 476, + 284, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 476, + 284, + 522 + ], + "spans": [ + { + "bbox": [ + 66, + 476, + 284, + 522 + ], + "type": "text", + "content": "Your task is to count objects in the image. Assume the pattern of [dot shape]s continues behind the black box. First, state what the pattern is, then give your final count." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 66, + 546, + 284, + 571 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 546, + 284, + 571 + ], + "spans": [ + { + "bbox": [ + 66, + 546, + 284, + 571 + ], + "type": "text", + "content": "Prompt for InternVL2 and Qwen2VL on CAPTUREsynthetic occluded split." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 66, + 577, + 285, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 577, + 285, + 696 + ], + "spans": [ + { + "bbox": [ + 66, + 577, + 285, + 696 + ], + "type": "text", + "content": "Count the exact number of [dot shape]s in the image. Assume the pattern of [dot shape]s continues behind any black box. Provide the total number of [dot shape]s as if the black box were not there. Only count [dot shape]s that are visible within the frame (or would be visible without the occluding box). If [dot shape]s are partially in the frame (i.e. if any part of [dot shape]s are visible), count it. If the [dot shape]s would be partially in the frame without the occluding box, count it." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "spans": [ + { + "bbox": [ + 300, + 732, + 310, + 742 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15585/c8803fe7-d918-414b-a5e2-cfaba643acbf_content_list.json b/data/2025/2504_15xxx/2504.15585/c8803fe7-d918-414b-a5e2-cfaba643acbf_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..8576e21fc9fe7b1e8577fd84a5e8fc321d48ba31 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/c8803fe7-d918-414b-a5e2-cfaba643acbf_content_list.json @@ -0,0 +1,8231 @@ +[ + { + "type": "text", + "text": "A Comprehensive Survey in LLM(-Agent) Full Stack Safety: Data, Training and Deployment", + "text_level": 1, + "bbox": [ + 102, + 65, + 893, + 137 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kun Wang\\*1,2, Guibin Zhang\\*3, Zhenhong Zhou†4, Jiahao Wu†5,6, Miao Yu7, Shiqian Zhao1, Chenlong Yin8, Jinhu Fu9, Yibo Yan10,11, Hanjun Luo12, Liang Lin13, Zhihao Xu14, Haolang Lu1, Xinye Cao1, Xinyun Zhou1, Weifei Jin1, Fanci Meng7, Shicheng Xu15, Junyuan Mao3, Yu Wang16, Hao Wu17, Minghe Wang12, Fan Zhang18, Junfeng Fang3, Wenjie Qu3, Yue Liu3, Chengwei Liu1, Yifan Zhang19, Qiankun Li7, Chongye Guo20,21, Yalan Qin20,21, Zhaoxin Fan22, Kai Wang3, Yi Ding1, Donghai Hong23, Jiaming Ji23, Yingxin Lai24, Zitong Yu24, Xinfeng Li1, Yifan Jiang25, Yanhui Li12, Xinyu Deng12, Junlin Wu12, Dongxia Wang12, Yihao Huang1, Yufei Guo23, Jen-tse Huang26, Qiufeng Wang27, Xiaolong Jin45, Wenxuan Wang14, Dongrui Liu21, Yanwei Yue23, Wenke Huang29, Guancheng Wan30, Heng Chang46, Tianlin Li1, Yi Yu1, Chenghao Li31, Jiawei Li33, Lei Bai21, Jie Zhang4, Qing Guo4, Jingyi Wang12, Tianlong Chen32, Joey Tianyi Zhou4, Xiaojun Jia1, Weisong Sun1, Cong Wu34, Jing Chen29, Xuming Hu10,11, Yiming Li1, Xiao Wang35, Ningyu Zhang12, Luu Anh Tuan1, Guowen Xu31, Jiaheng Zhang3, Tianwei Zhang1, Xingjun Ma37, Jindong Gu38, Liang Pang15, Xiang Wang7, Bo An1, Jun Sun36, Mohit Bansal32, Shirui Pan28, Lingjuan Lyu40, Yuval Elovici41, Bhavya Kailkhura42, Yaodong Yang23, Hongwei Li31, Wenyuan Xu12, Yizhou Sun30, Wei Wang30, Qing Li5, Ke Tang6, Yu-Gang Jiang37, Felix Juefei-Xu43, Hui Xiong10,11, Xiaofeng Wang46, Dacheng Tao1, Philip S. Yu44, Qingsong Wen2, Yang Liu1", + "bbox": [ + 73, + 157, + 921, + 415 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Nanyang Technological University, $^{2}$ Squirrel AI Learning, $^{3}$ National University of Singapore, $^{4}$ A*STAR, $^{5}$ The Hong Kong Polytechnic University, $^{6}$ Southern University of Science and Technology, $^{7}$ University of Science and Technology of China, $^{8}$ The Pennsylvania State University, $^{9}$ TeleAI, $^{10}$ Hong Kong University of Science and Technology (Guangzhou), $^{11}$ Hong Kong University of Science and Technology, $^{12}$ Zhejiang University, $^{13}$ Institute of Information Engineering, Chinese Academy of Sciences, $^{14}$ Renmin University of China, $^{15}$ Institute of Computing Technology, Chinese Academy of Sciences, $^{16}$ University of California, San Diego, $^{17}$ Tencent, $^{18}$ Georgia Institute of Technology, $^{19}$ Institute of Automation, Chinese Academy of Sciences, $^{20}$ Shanghai University, $^{21}$ Shanghai AI Laboratory, $^{22}$ Beihang University, $^{23}$ Peking University, $^{24}$ Great Bay University, $^{25}$ University of Southern California, $^{26}$ Johns Hopkins University, $^{27}$ Southeast University, $^{28}$ Griffith University, $^{29}$ Wuhan University, $^{30}$ University of California, Los Angeles, $^{31}$ University of Electronic Science and Technology of China, $^{32}$ The University of North Carolina at Chapel Hill, $^{33}$ Tsinghua University, $^{34}$ The University of Hong Kong, $^{35}$ University of Washington, $^{36}$ Singapore Management University, $^{37}$ Fudan University, $^{38}$ University of Oxford, $^{39}$ New York University, $^{40}$ Sony, $^{41}$ Ben Gurion University, $^{42}$ Lawrence Livermore National Laboratory, $^{43}$ New York University, $^{44}$ University of Illinois at Chicago, $^{45}$ Purdue University, $^{46}$ ACM Member", + "bbox": [ + 73, + 425, + 921, + 681 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract—The remarkable success of Large Language Models (LLMs) has illuminated a promising pathway toward achieving Artificial General Intelligence for both academic and industrial communities, owing to their unprecedented performance across various applications. As LLMs continue to gain prominence in both research and commercial domains, their security and safety implications have become a growing concern, not only for researchers and corporations but also for all nations. Currently, existing surveys on LLM safety primarily focus on specific stages of the LLM lifecycle, e.g., deployment phase or fine-tuning phase, lacking a comprehensive understanding of the entire \"lifechain\" of LLMs. To address this gap, this paper introduces, for the first time, the concept of \"full-stack\" safety to systematically consider safety issues throughout the entire process of data, training (pre-training, post-training), deployment (deployment and final commercialization). Compared to the off-the-shelf LLM safety surveys, our work demonstrates several distinctive advantages: (I) Comprehensive Perspective. We define the complete LLM lifecycle as encompassing data preparation, pre-training, post-training (including alignment and fine-tuning, model editing, etc.), deployment and final commercialization. To our knowledge, this represents the first safety survey to encompass the entire lifecycle of LLMs. (II) Extensive Literature Support. Our research is grounded in an exhaustive review of over $900+$ papers, ensuring comprehensive coverage and systematic organization of safety issues within a more holistic understanding. (III) Unique Insights. Through systematic literature analysis, we develop reliable roadmaps and perspectives for each chapter. Our work identifies promising research directions, including safety in data generation, alignment techniques, model editing, and LLM-based agent systems. These insights provide valuable guidance for researchers pursuing future work in this field. We provide an up-to-date review of the literature on LLM (agent) safety at https://github.com/bingreeky/full-stack-llm-safety, which can be considered a useful support for both researchers and engineers.", + "bbox": [ + 104, + 705, + 888, + 929 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.15585v4 [cs.CR] 9 Jun 2025", + "bbox": [ + 22, + 284, + 57, + 710 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Index Terms—Large Language Model, LLM-based Agent, Safety, Post-training, Alignment, Model Editing, Unlearning, Evaluation", + "bbox": [ + 104, + 941, + 862, + 955 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 73, + 51, + 230, + 66 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The emergence and success of large language models (LLMs) [1, 2, 3, 4, 5] have greatly transformed the modes of production in both academia and industry [6, 7, 8, 9, 10, 11, 12, 13], opening a potential path for the upcoming artificial general intelligence [14, 15, 16]. Going beyond this, LLMs, by integrating tools [17, 18, 19, 20], memory [21, 22, 23, 24], APIs [25, 26], and by constructing single-agent or multiagent systems with other LLMs, provide powerful tools for large models to perceive, understand, and change the environment [27, 28, 29, 30]. This has garnered considerable attention for embodied intelligence [31, 32].", + "bbox": [ + 71, + 98, + 491, + 258 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Unfortunately, the entire lifecycle of LLMs is constantly confronted with security and safety issues [33, 34, 35, 36, 37]. During the data preparation phase, since LLMs require ample and diverse data, and a significant amount of data is sourced from the Internet and other open-source scenarios, the toxicity in the data and user privacy may seep into the model parameters, triggering crises in the model [38, 39, 40]. The pretraining process of the model, due to its unsupervised nature, unconsciously absorbs these toxic data and privacy information, thereby causing the model's \"genetic makeup\" to carry dangerous characteristics and privacy issues [41, 42, 43, 44].", + "bbox": [ + 71, + 262, + 491, + 439 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Before the model is deployed, if it is not properly aligned with security measures, it can easily deviate from human values [45, 46]. Meanwhile, to make the model more \"specialized,\" the fine-tuning process will employ safer and more customized data to ensure the model performs flawlessly in specific domains [47, 48, 49, 50]. The model deployment process also involves issues such as jailbreak attacks and corresponding defense measures [51, 52, 53], especially for LLM-based agents [54]. These agents may become contaminated due to their interaction with tools, memory, and the environment [55, 56, 57, 58].", + "bbox": [ + 71, + 441, + 491, + 603 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Previous surveys on LLMs have primarily focused on the research aspects of LLM itself, often overlooking detailed discussions on LLM safety [7, 34] and in-depth exploration of trustworthiness issues [75]. Meanwhile, off-the-shelf surveys that do address LLM safety tend to concentrate on various trustworthiness concerns or are limited to a single phase of the LLM lifecycle [33, 76, 77], such as the deployment stage and fine-tuning stage. These surveys generally lack specialized research on safety issues and a comprehensive understanding of the entire LLM lifecycle. Table 1 summarizes the differences between our survey and previous surveys. Upon reviewing the aforementioned survey and systematically investigating the related literature, we conclude that our survey endeavors to address several questions that existing surveys have not covered:", + "bbox": [ + 71, + 607, + 491, + 828 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/685148b9c0f4a2a321b9e38c2fc1efd2445dfe68b9b9d5fe4b2d371fa637fe5b.jpg", + "table_caption": [ + "TABLE 1: Survey Comparison on LLMs and Agents settings." + ], + "table_footnote": [ + "$\\ddagger$ : Single-modal LLM (S), Multi-modal LLM (M).", + "$\\S$ : Single-modal Agent (S), Multi-modal Agent (M), Multi-agent System (MAS).", + "$\\star$ : Pre-training (PT), Fine-tuning (FT), Deployment (Dep), Evaluation (Eval)." + ], + "table_body": "
SurveyObjectStage*
\\( LLM^‡ \\)\\( Agent^§ \\)DataPTEditFTDepEval
Year 2023
Zhao et al. [6]S+M-X
Liang et al. [59]M-XX
Chang et al. [7]S+M-XXXX
Zhang et al. [60]S+M-XXX
Wang et al. [28]-SXXXX
Zhao et al. [61]S-XXX
Xi et al. [29]-S+MASXXXX
Shen et al. [62]S-XXX
Raijan et al. [63]S-XXXX
Kalyan et al. [64]S+M-XX
Huang et al. [51]S-XXX
Shayegani et al. [65]S+MMASXXXX
Yao et al. [66]S-XXXX
Year 2024
Guo et al. [27]-S+MASXXXX
Qin et al. [67]S+M-XX
Hadi et al. [68]S-XXX
Sun et al. [69]S+MSXXX
Das et al. [70]S-XXXX
He et al. [71]-S+M+MASXXXXX
Wang et al. [54]-S+MASXXXXX
Year 2025
Tie et al. [72]S+M-XXX
Ma et al. [33]S+MS+MXX
Huang et al. [73]S+MS+MXX
Yu et al. [74]SS+MASXXXX
Chen et al. [36]S-XX
OursS+MS+M+MAS
", + "bbox": [ + 506, + 88, + 923, + 415 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/d343f41a4fec2cbd537ab3a55e973848372f25236d86cec814430e4e0878d83c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 529, + 479, + 558, + 498 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "What aspects should the safety of large models be compass?", + "text_level": 1, + "bbox": [ + 558, + 488, + 898, + 518 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Contribution 1. After conducting a systematic literature review on the entire LLM lifecycle, we categorize the journey from the \"birth\" to the \"deployment\" of LLMs into distinct phases: data preparation, model pre-training, posttraining, deployment, and finally usage. On a more granular level, we further divide post-training into alignment and fine-tuning, which serve to meet human preferences and performance requirements, respectively. Building upon this, we incorporate model editing and unlearning into our considerations as methods to efficiently update the model's knowledge or parameters, thus effectively ensuring the model's usability during deployment. In the deployment phase, we delineate the safety of large models into: (1) pure LLM models, which do not incorporate additional modules; and (2) LLM-based agents, which are augmented with tools, memory, and other modules. This framework encompasses the entire cycle of model parameter training, convergence, and solidification.", + "bbox": [ + 503, + 535, + 924, + 797 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/27b4f09fd68aecd75ec6c15b4737e73a3198fcfd3d6788e2cf6e5147233143f5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 529, + 816, + 558, + 835 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "How to provide a clearer taxonomy and literature ew?", + "text_level": 1, + "bbox": [ + 558, + 825, + 897, + 853 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Contribution 2. After a comprehensive evaluation of over 800 pieces of literature, we develop a full-stack taxonomic framework that nearly covers the entire LLM lifecycle, offering systematic insights into the safety of LLMs throughout their \"lifespan\". We provide a more reliable", + "bbox": [ + 503, + 869, + 924, + 944 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "Kun Wang is with Nanyang Technological University (wang.kun@ntu.edu.sg), Guibin Zhang is with National University of Singapore (guibinz@outlook.com), Jiahao Wu is with The Hong Kong Polytechnic University (jiahao.wu@connect.polyu.hk), Zhenhong Zhou is with A\\*STAR (ydyjyazhh@gmail.com), Yang Liu is with Nanyang Technological University (yangliu@ntu.edu.sg). * denotes equal contribution and † denotes the corresponding authors.", + "bbox": [ + 71, + 859, + 491, + 941 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "correlation analysis between each phase of the LLM timeline and other relevant sections, aiding readers in understanding the safety issues of LLMs while also clarifying the research stage of each LLM phase.", + "bbox": [ + 71, + 53, + 491, + 113 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/869c97d2b966b4966a92a247de0ac1218fd4faaff7252718c47a2b6aff524844.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 98, + 132, + 125, + 152 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "What are the potential growth areas for future M safety concerns?", + "text_level": 1, + "bbox": [ + 127, + 141, + 465, + 170 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Contribution 3. Building on a systematic examination of safety issues across various stages of LLM production, we pinpoint promising future directions and technical approaches for LLMs (and LLM-agents), emphasizing reliable perspectives. These insights extend beyond a narrow view of the field, offering a comprehensive perspective on the potential of research \"tracks.\" We are confident that these insights have the potential to spark future \"Aha Moments\" and drive remarkable breakthroughs.", + "bbox": [ + 71, + 181, + 491, + 313 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Taxonomy. Our article begins with the structural preparation of data. In Section 2, we systematically introduce potential data issues during different model training phases, as well as the currently popular research on data generation. In Section 3, we focus on the security and safety concerns during the pre-training phase, which includes two core modules: data filtering and augmenting. In Section 4, we concentrate on the post-training phase, differing from previous works by incorporating fine-tuning and alignment, which involve attack, defense, and evaluation. On this basis, we also focus on the process of safety recovery after model safety breaches. In Section 5, we observe that models require dynamic updates in real-world scenarios. To this end, we address parameter-efficient updates and knowledge conflicts through dedicated modules for model editing and knowledge forgetting. Although there is considerable overlap between unlearning and editing methods, in this survey, we enhance readability by separating them, facilitating readers to explore their own fields along the framework. Subsequently, in Section 6, we focus on the safety issues after the model parameters are solidified, which share many commonalities with traditional large model security surveys. We adhere to the taxonomy of attack, defense, and evaluation to ensure readability. Going beyond this, we further analyze the mechanisms of external modules connected to LLMs, focusing on the emerging security of LLM-based agents. Finally, in Section 7, we present multiple safety concerns for the commercialization and ethical guidelines, as well as user usage, of LLM-based applications. To provide readers with a comprehensive understanding of our research framework, we dedicate Section 8 to outlining promising future research directions, while Section 9 presents synthesized conclusions and broader implications.", + "bbox": [ + 71, + 314, + 491, + 795 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "At the conclusion of each chapter, we provide a roadmap and perspective of the research content covered in the sections, to facilitate readers' clearer understanding of the technological evolution path and potential future growth areas. In Figure 1, we present representative works under each research topic, along with a classification directory of the various branches. Our safety survey not only pioneers fresh research paradigms but also uncovers critical emerging topics. By mapping security considerations throughout LLMs' complete lifecycle, we establish a standardized", + "bbox": [ + 71, + 796, + 491, + 941 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "research architecture that will guide both academic and industrial safety initiatives.", + "bbox": [ + 503, + 53, + 921, + 82 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 DATA SAFETY", + "text_level": 1, + "bbox": [ + 504, + 104, + 651, + 119 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In the first section, we begin with the data. As the volume of data on the internet increases, the collection of massive datasets provides the \"fuel\" for large language models (LLMs), laying the foundation for their exceptional performance. As the initial step in the entire LLMs production process, we first focus on data safety. Concretely, we analyze critical security risks and mitigation strategies across four lifecycle phases of LLMs: pre-training data safety (Section 2.1), fine-tuning data safety (Section 2.2) and alignment data safety (Section 2.3). Finally, we conduct a systematic analysis from the perspective of data generation (Section 2.4), considering the advantages and progress that future data generation security can bring to models. We summarize the literature on secure and reliable data generation.", + "bbox": [ + 501, + 127, + 924, + 332 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Pretraining Data Safety", + "text_level": 1, + "bbox": [ + 503, + 353, + 718, + 369 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The pretraining phase of LLMs relies heavily on massive, diverse datasets collected from the Internet [78, 79, 80] or open-source data platforms [81, 82] (e.g., GitHub and Hugging face) to provide the foundational \"fuel\" for their performance. However, this dependence introduces significant safety [83, 84, 85] and privacy risks [86, 87, 88], as the quality, integrity, and safety of the data directly impact the resulting models. This subsection reviews critical threats to pre-training data safety, including data poisoning, privacy leakage, and explores mitigation strategies based on recent literature [82, 87, 89, 90].", + "bbox": [ + 501, + 373, + 923, + 532 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Training Data Poisoning. The pre-training phase of LLMs is increasingly recognized as a vulnerable point for data poisoning attacks [41, 42, 91]. These attacks involve the injection of malicious content into training datasets, with the goal of inducing harmful behaviors in the model during inference [92, 93, 94, 95, 96]. Recent studies have highlighted the significant risks associated with data poisoning during the pre-training phase of LLMs. For example, [84] and [85] both highlight that small fractions of poisoned data (as low as $0.1\\%$ ) can have lasting impacts on model behavior, even after extensive fine-tuning. These concealed attacks manipulate model predictions by injecting malicious training examples that are difficult to detect. Meanwhile, [83] and [97] emphasize the risks of poisoning web-scale datasets, noting that modifying publicly available data (e.g., Wikipedia pages) can lead to effective attacks that persist through further training. The study by Sun et al. [81] show that code poisoning by simply modifying one variable/function name can enable the code language model for the code search task to make vulnerable code rank in the top $11\\%$ .", + "bbox": [ + 503, + 534, + 923, + 825 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Privacy leakage. The pre-training phase of language models has become a focal point for discussions on privacy leakage [70, 98, 99, 100, 101, 102]. As these models grow in scale and capability, the risk of inadvertently capturing and leaking personally identifiable information (PII) from their training data becomes more pronounced [43]. [103, 104, 105] have specifically highlighted this concern in the context of LLMs, demonstrating that these models can memorize and", + "bbox": [ + 503, + 825, + 924, + 941 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/24a54e97c11e8e51e263b7b98b9b21713213013b735e4963916bfb2d477a4b18.jpg", + "image_caption": [ + "Fig. 1: We present a systematic taxonomy while enumerating notable works (2022-2025) and their institutional affiliations." + ], + "image_footnote": [], + "bbox": [ + 71, + 56, + 924, + 575 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "reproduce sensitive information through targeted attacks. Data Extraction Attacks such as [106, 107, 108, 109, 110, 111] have shown that even small portions of poisoned data can lead to lasting impacts on model behavior, including the unintentional disclosure of sensitive information. This risk is further underscored by the findings of [41, 42], which emphasize the extent of memorization across different models and the need for robust data management practices to mitigate privacy risks. Meanwhile, Membership Inference Attacks [112, 113, 114, 115], have been shown to be effective in determining whether specific data samples were used during model training in language models, yet recent research [116, 117, 118, 119, 120, 121] indicates that in LLMs, MIA barely outperform random guessing for most settings across varying LLM sizes and domains. Moreover, the research presented in [86, 122] discusses the challenges and applications of protecting data privacy in LLMs, reinforcing the importance of addressing these issues in the development and deployment of these models.", + "bbox": [ + 71, + 618, + 490, + 896 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Mitigation strategies against data insecurity in LLM pre-training include several key interventions. To address toxic content, custom classifiers trained on safety datasets", + "bbox": [ + 71, + 898, + 491, + 941 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "are employed to detect and filter pre-training data [89, 123, 124]. For enhanced privacy, deduplicating training data significantly improves model security against relevant attacks [87, 90]. Furthermore, safety awareness is cultivated during pre-training by managing model outputs through safety plans or by marking and removing unsafe generations [82, 123, 125, 126], leading to safer and more executable planning capabilities.", + "bbox": [ + 503, + 618, + 921, + 736 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Mitigation measures. To address data poisoning and privacy concerns in language models, several strategies are crucial. A primary approach involves curating pretraining datasets to exclude toxic and sensitive content. [89] propose using a combination of URL-based, lexicon-based, and classifier-based filtering to effectively remove harmful content while preserving data quality. Another important strategy is employing data dedduplication techniques, which can prevent model memorization of specific instances, thereby reducing privacy risks. [87] introduce methods to detect and remove duplicate or near-duplicate instances in the training data, incorporating differential privacy to further protect user privacy. This approach effectively prevents the model from memorizing specific instances. In addition, developing", + "bbox": [ + 503, + 737, + 921, + 944 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/c15a0bf22feefb3d8da849662a077a55a7495872bd7c6e07d3f46668bf5282f1.jpg", + "image_caption": [ + "Fig. 2: LLMs encounter a wide range of data safety risks throughout their lifecycle, from the initial stages of data collection and pre-processing to model training, deployment, and ongoing updates." + ], + "image_footnote": [], + "bbox": [ + 81, + 55, + 488, + 422 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "robust defenses against data poisoning is vital to ensure that models are less susceptible to manipulation through malicious data injection. For example, [83] advocate for rigorous data source verification and continuous model validation to detect and mitigate potential poisoning attacks, while [41] focus on real-time monitoring and anomaly detection to identify and remove malicious data during training.", + "bbox": [ + 71, + 513, + 490, + 617 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.2 Fine-tuning Data Safety", + "text_level": 1, + "bbox": [ + 71, + 630, + 290, + 645 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Data safety in the fine-tuning stage has emerged as a critical concern in the development of LLMs, with data poisoning attacks presenting particularly sophisticated threats to LLMs [127]. Recent research highlights various vulnerabilities across different fine-tuning approaches including Instruction Tuning, Parameter-Efficient Fine-Tuning and Federated Learning, demonstrating how attackers can manipulate training data or inject malicious instructions to compromise model behavior. These risks include:", + "bbox": [ + 71, + 648, + 490, + 779 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$\\Rightarrow$ Instruction Tuning Risks. Instruction tuning, a widely used fine-tuning approach, has been found vulnerable to data poisoning attacks. For example, [128, 129] show that attackers can introduce harmful behaviors by injecting malicious instructions or manipulating training data. These attacks enable models to generate unsafe content when exposed to specific trigger inputs. Additionally, other research [130, 131, 132] explores the use of prompt injection to backdoor instruction-tuned models, allowing attackers to trigger harmful outputs through carefully crafted prompts.", + "bbox": [ + 66, + 781, + 491, + 943 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Parameter-Efficient Fine-Tuning Risks. Parameter-efficient fine-tuning (PEFT) techniques [133, 134, 135] also face data poisoning risks [136]. [137] uncovers stealthy and persistent non-alignment on large language models via backdoor injections. Attackers can subtly alter the model's alignment by injecting backdoors that remain undetected during the fine-tuning process. [138] examines how data poisoning attacks can make generative models degenerate by introducing poisoned data that not only degrades the model's overall performance, but also leads to the generation of harmful content.", + "Federated Learning Risks. Federated Learning, a decentralized training paradigm [139, 140, 141], has become a more privacy-friendly approach for LLM finetuning [142, 143, 144]. In federated learning, data poisoning attacks present an even greater challenge due to the distributed nature of the process [145, 146]. Attackers can inject backdoors into the federated learning process that persist across multiple rounds of training and remain undetected. [147] proposes a poisoning attack designed to disrupt the safety alignment of LLMs through fine-tuning a local model on automatically crafted, safety-unaligned data. [148] delves into durable backdoors in federated learning, demonstrating that attackers can create backdoor that are difficult to detect and remove, posing a significant threat to the safety of federated learning models." + ], + "bbox": [ + 500, + 53, + 923, + 448 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.3 Alignment Data Safety", + "text_level": 1, + "bbox": [ + 504, + 465, + 712, + 481 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "From a data-centric perspective, data poisoning attacks pose a significant threat to the integrity and reliability of LLMs by corrupting the training datasets [149, 150]. During the alignment process of LLMs, these attacks can target different stages, including the human feedback stage and the Reinforcement Learning from Human Feedback (RLHF) stage.", + "bbox": [ + 503, + 484, + 921, + 571 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Human Feedback Stage. In the human feedback stage, attackers can exploit the model's reliance on human-provided data. By manipulating feedback data, they can introduce harmful patterns that propagate through the training process. Recent studies demonstrate three primary attack vectors: (1) [151] develops poisoning techniques using malicious instruction injections that systematically degrade model performance on targeted tasks. (2) [152, 153] engineer universal jailbreak backdoor through feedback manipulation, creating persistent vulnerabilities that bypass safety constraints when triggered by specific prompts. (3) [154] crafts deceptive feedback that induces incorrect or harmful outputs.", + "$\\nRightarrow$ Reinforcement Learning from Human Feedback (RLHF) Stage. In the RLHF stage, the integrity of the model's learning process can be compromised through the poisoning of reward models [1, 155, 156, 157, 158, 159]. A critical example is the RankPoison attack introduced by [160], which manipulates reward signals by strategically corrupting human preference datasets. Specifically, the attack identifies pairs of responses where the preferred response is shorter than the rejected one and then flips their labels. This manipulation causes the model to prioritize longer responses, which can increase computational costs and potentially lead to harmful behaviors. This underscores" + ], + "bbox": [ + 500, + 575, + 923, + 941 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 32, + 421, + 44 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "the importance of robust safeguards in preference data curation and reward model validation during alignment.", + "bbox": [ + 86, + 53, + 488, + 82 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "2.4 Safety in Data Generation", + "text_level": 1, + "bbox": [ + 73, + 104, + 305, + 119 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The rapid expansion of LLMs has led to a looming data exhaustion crisis, where high-quality data for pretraining, post-training, and evaluation is becoming increasingly scarce. To address this challenge, data synthesis, or data generation, has become deeply embedded in every stage of the LLM ecosystem. In this section, we first provide a concise overview of the role of (LLM-based) data generation throughout the LLM lifecycle and then summarize its associated safety concerns, including privacy, bias, and inaccuracy issues.", + "bbox": [ + 71, + 125, + 491, + 271 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Data Generation in the Lifecycle of LLMs. Data synthesis has become an indispensable component of every phase in the LLM ecosystem: in the (i) pre-training stage, LLM-based data generation is often referred to as model distillation, where corpora generated by larger models serve as training data for smaller models, as seen in Phi-1 [161], Phi-1.5 [162], and AnyGPT [163], among others. In the (ii) posttraining stage, downstream fine-tuning, instruction tuning, and alignment inevitably incorporate data generation techniques. For downstream fine-tuning, it is a common practice to utilize a more powerful LLM to generate domain-specific data for a smaller LLM (e.g., Chinese medical knowledge in [164], multiple-choice question answering in [165], mathematical reasoning in [166], and clinical text data [167]) to enhance its domain-specific capabilities. It is also empirically validated that LLM-generated data (e.g., action trajectories, question-answer pairs) can be beneficial for improving the reasoning [168, 169], planning, function calling [170] abilities. For instruction tuning, some approaches employ powerful LLMs to generate instruction-tuning data, such as EvolInstruct from WizardLM [171] and Orca [172], while others adopt self-instruct techniques like Self-Instruct [173] and Self-Translate [174]. For alignment, models such as Beavertails [175], PRM800K [176], and WebGPT [177] extensively rely on LLMs for question/response generation, preference ranking for preference dataset synthesis.", + "bbox": [ + 71, + 272, + 491, + 650 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Safety Issues and Mitigation. Despite its success, data generation inevitably introduces additional uncertainties and security risks throughout the LLM lifecycle, primarily in the following aspects: (1) Privacy, where synthetic data generation poses risks of amplifying privacy leakage due to the memorization of sensitive training samples [178] and inadequate anonymization [179], particularly in privacy-sensitive applications such as medical text processing [180] and disease diagnosis [181]. (2) Bias and Fairness, as LLMs inherently exhibit societal biases [182] (e.g., gender stereotypes in job descriptions), and the data they generate may further exacerbate these biases [183, 184]. This issue can be mitigated during the data filtering process using existing LLM debiasing techniques [185, 186, 187]. (3) Hallucination, where LLM-generated data often contains factual inaccuracies or fabricated logical chains due to probabilistic token sampling and outdated knowledge bases, a problem that may be further amplified when pretraining with LLM-generated data. Potential solutions include filtering generated data using existing hallucination detection", + "bbox": [ + 71, + 650, + 493, + 944 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "techniques [188, 189]. (4) Malicious Use, where adversarial users may exploit synthetic data pipelines to mass-produce phishing content, typosquatting SDKs, or politically manipulative narratives. (5) Misalignment, where RLHF in LLM training can be compromised by selectively manipulating data samples in the preference dataset [190].", + "bbox": [ + 503, + 53, + 924, + 142 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "2.5 Roadmap & Perspective", + "text_level": 1, + "bbox": [ + 504, + 159, + 725, + 174 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "2.5.1 Reliable Data Distillation", + "text_level": 1, + "bbox": [ + 504, + 176, + 730, + 191 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The proliferation of LLM-driven data synthesis for knowledge distillation and model self-improvement introduces critical security vulnerabilities across the entire LLM lifecycle. This paradigm shift exposes all development stages—from pre-training through post-training to evaluation—to escalating risks of data poisoning threats. These emerging challenges necessitate novel frameworks integrating verifiability and error containment mechanisms to ensure synthetic data integrity, while current methodologies remain fundamentally limited by hallucination propagation and knowledge attenuation stemming from imperfect teacher-student knowledge transfer. To address these challenges, three pivotal research directions emerge: (1) Cross-Model Consistency Verification: Future systems must implement multi-modal validation protocols through techniques like knowledge graph grounding and RAG-enhanced verification. Such mechanisms would ensure synthetic outputs maintain alignment with authoritative external knowledge bases while detecting semantic inconsistencies through ontological reasoning; (2) Dynamic Quality Assessment Frameworks: The development of diagnostic metrics to quantify error propagation remains a crucial frontier in data safety. Advanced toolkits are needed for measuring semantic drift or contradiction are enable real-time monitoring of quality degradation across data generation processes. (3) Heterogeneous Filtering Pipelines: While existing filtering mechanisms provide partial solutions, significant progress lies in effectively synthesizing multi-source verification signals, including human expert insight, rule-based invalidators, and model-based critics specializing in detecting nuanced factual discrepancies through contrastive learning paradigms.", + "bbox": [ + 501, + 195, + 924, + 662 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "2.5.2 Novel Data Generation Paradigms", + "text_level": 1, + "bbox": [ + 504, + 674, + 797, + 690 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Emerging approaches in data generation should leverage agent-based simulation frameworks to create a self-sustaining data flywheel for LLMs. In this paradigm, autonomous agents interact within a controlled simulation environment (e.g., Github, StackOverflow) to generate, evaluate, and iteratively refine synthetic datasets with minimal human intervention. Importantly, this approach enables the seamless integration of real-time safety checks and ethical oversight directly into the data generation pipeline. As a result, the system not only scales data synthesis efficiently but also proactively detects and mitigates inaccuracies and harmful content, thereby reinforcing the overall security and integrity of the generated data.", + "bbox": [ + 501, + 691, + 924, + 883 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "2.5.3 Advanced Data Poisoning & Depoisoning", + "text_level": 1, + "bbox": [ + 504, + 895, + 844, + 910 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Future poisoning techniques are anticipated to evolve in several sophisticated directions. On the poisoning front,", + "bbox": [ + 503, + 912, + 924, + 944 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "adversaries may go toward fragment poisoning and covert poisoning paradigms. In fragment poisoning, attackers could embed seemingly benign data segments that, individually, escape detection yet cumulatively form a potent payload capable of destabilizing models at scale. Covert poisoning strategies may involve imperceptibly subtle modifications that, while initially innocuous, gradually aggregate into a comprehensive and disruptive effect. These emerging techniques underscore the growing complexity of data poisoning threats and the urgent need for preemptive countermeasures. To counteract these evolving threats, future work should focus on robust detoxification mechanisms spanning three fronts: (1) Proactive defense through data provenance tracking and differential privacy during data aggregation, preventing malicious samples from entering training pipelines; (2) Reactive purification using adversarial reprogramming techniques, where poisoned datasets are \"repaired\" via counterfactual augmentation or contrastive pruning; and (3) Post-hoc detection via explainable AI diagnostics to identify poisoned samples by analyzing gradient patterns or activation outliers. Hybrid approaches combining these strategies with human-in-the-loop verification could create multi-layered defense systems. Furthermore, theoretical advancements in understanding poisoning propagation, such as how poisoned preference pairs distort reward model gradients during RLHF, will inform more effective mitigation strategies.", + "bbox": [ + 71, + 53, + 491, + 448 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3 PRE-TRAINING SAFETY", + "text_level": 1, + "bbox": [ + 73, + 469, + 294, + 483 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we examine the safety of LLMs in the pretraining phase, covering two key dimensions: Pre-training Data Filtering (Section 3.1) and Pre-training Data Augmentation (Section 3.2). Since the pretraining phase typically does not involve active adversarial attacks, our discussion primarily focuses on both the inherent risks present in largescale corpora [2, 4, 78, 81, 82, 97, 124, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205], such as harmful content and privacy violations—and strategies for augmenting the safety of training data, including integrating safe demonstration examples [191, 206, 207, 208] and annotating toxic content to better mitigate these risks [124, 195, 207, 209]. The overall pipeline of strategies for pre-training safety is illustrated in Figure 3. Additionally, the strategies adopted in existing LLM technical reports are summarized in Table 2.", + "bbox": [ + 71, + 489, + 490, + 723 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.1 Data Filtering for Pretrain Safety", + "text_level": 1, + "bbox": [ + 71, + 744, + 354, + 758 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.1.1 Heuristic based Filtering", + "text_level": 1, + "bbox": [ + 73, + 763, + 297, + 777 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Heuristic-based filtering, leveraging domain blacklist [78, 193, 194], keyword-based matching [191, 193] and predefined rules [2, 124, 195, 202], is one of the most widely adopted approaches to remove undesirable content before training. With most training data sourced from the Internet [211], domain blacklist provides an efficient initial safeguard by filtering predefined harmful websites and domains. [194] compiles a 13M unsafe domain list, while [78] aggregates a 4.6M URL blacklist targeting spam and adult content. In practice, domains with a high likelihood of containing personally identifiable information (PII) are also", + "bbox": [ + 71, + 781, + 490, + 941 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "TABLE 2: Strategies for Enhancing Safety in the Pre-training Stage. $\\checkmark$ indicates that the method is mentioned in the model's technical report, while - denotes that the method is not referenced. $①$ represents Integrating Safe Demonstration, and A denotes Annotating Toxic Content. \"Augmenting\" denotes Augmenting Training Data.", + "bbox": [ + 506, + 47, + 921, + 148 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/6efbd7f0539bfa93a6cba65f0dec18235d380fd7d9d02766723b3f95bd152dd6.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelData FilteringAugmentation
Heuristic-Model-Blackbox
GPT-4 [191]--
GPT-4o(mini) [124, 202]-
GPT-o1 [201]--
Llama2 [2]---
Llama3 [193]--
Yi [192]--
InternLM2 [194]--
PaLM2 [195]--A
DeepSeek-V2 [4]---
ChatGLM [196]---
Baichuan2 [203]--
Gemini [197]-
Gemini1.5 [209]-
TigerBot [206]--1
Gemma [198]--
Nemotron-4 [200, 210]--
RefinedWeb [78]---
", + "bbox": [ + 506, + 160, + 923, + 401 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "included in the blacklist [2, 193, 195, 202]. Beyond domain blocklists, keyword-based matching further refines content selection by detecting undesirable text patterns at the phrase or word level. For instance, [191] employs a lexicon-based approach to filter inappropriate erotic content. Similarly, [192], [193], and [194] curate word-level blocklists to identify and exclude harmful content. Given that domain blacklist and keyword-based matching might inadvertently exclude a large amount of data [194], developing heuristic-based filtering based on carefully predefined rules provides a balance between content safety and data retention. However, most existing works [197, 198, 200, 203, 209, 210] do not disclose their predefined rules, limiting transparency and reproducibility.", + "bbox": [ + 503, + 422, + 921, + 628 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.1.2 Model based Filtering", + "text_level": 1, + "bbox": [ + 504, + 638, + 710, + 654 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Model-based filtering leverages learned representations to assess content adaptively. [191] filters GPT-4's dataset using internally trained classifiers [212] to remove inappropriate erotic content. [192] employs the Safety Scorer to remove toxic web content, such as violence, pornography, and political propaganda. [194] fine-tunes BERT on the Kaggle \"Toxic Comment Classification Challenge\" dataset and a pornography classification dataset annotated via the Perspective $\\mathrm{API}^1$ , using the resulting classifiers for secondary filtering to ensure safer data. Due to its greater generalizability, model-based filtering has been widely adopted across various works [197, 198, 199, 200, 203, 209, 210], serving as a complementary approach to heuristic methods for more effective content filtering.", + "bbox": [ + 501, + 656, + 923, + 861 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.1.3 Blackbox Filtering", + "text_level": 1, + "bbox": [ + 504, + 872, + 684, + 887 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Blackbox filtering mostly relies on policy-driven [4, 197, 209, 213] or API-based [124, 201, 202] methods with undisclosed", + "bbox": [ + 503, + 888, + 921, + 919 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "1. https://perspectiveapi.com/", + "bbox": [ + 517, + 928, + 705, + 941 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/3a143801a40ca350831d89f8b6734dec72d5b207c62eb1650bfeb14b6904c9f6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 106, + 82, + 472, + 209 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/58dfd579ca3459049d3c5f80bf67f5511b180af8ccd32375a55790a24fa9dbda.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 117, + 210, + 472, + 321 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/39576eab3b13c6f58322b854afc4d334e49b19b1700b0e374e0731a1c01b150e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 117, + 321, + 472, + 436 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/cf7b73540606aebc13ca472854080f2b100d90a206010547361e44605ddbdfc2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 94, + 439, + 472, + 563 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b77e0d27b30fab18c8a5198df55bc3557bc9da7c2ff2c1bd5181dc84b6441a02.jpg", + "image_caption": [ + "Fig. 3: Pipeline of the Strategies for Pre-training Safety. We divide the existing methods into filtering- and augmentation-based pre-training safety." + ], + "image_footnote": [], + "bbox": [ + 112, + 564, + 472, + 680 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "filtering criteria and implementation details. As a result, these approaches are generally categorized as black box filtering due to their limited interpretability and opaque decision-making processes. Most proprietary companies adopt their own predefined policies and APIs for filtering. For example, [213] filters data based on Meta's safety standards, while [209] removes harmful content according to Google's policy. [124, 201, 202] use the Moderation $\\mathrm{API}^2$ for PII detection and toxicity analysis to refine filtering.", + "bbox": [ + 71, + 782, + 490, + 914 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.2 Augmenting Training Data for Pre-training Safety", + "text_level": 1, + "bbox": [ + 504, + 53, + 906, + 69 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In addition to filtering strategies, some works enhance training data to improve pre-training safety. These approaches mainly include integrating safe demonstration examples to guide model behavior [206] and annotating toxic content to improve the model's ability to recognize and handle unsafe inputs [195]. [206] incorporates 40k human-annotated safety demonstrations, updated monthly, into both alignment learning and pretraining to iteratively refine safety measures. [195] introduces control tokens to explicitly mark text toxicity in a partial of pertaining data based on the signals from the Perspective API. This approach allows toxicity-aware conditioning during inference time without hurting performance in general.", + "bbox": [ + 503, + 71, + 921, + 263 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.3 Roadmap & Perspective", + "text_level": 1, + "bbox": [ + 504, + 281, + 723, + 296 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The development of pre-training safety encompasses a diverse set of techniques. Heuristic-based filtering utilizes domain blocklists, keyword matching, and predefined rules to efficiently exclude overtly harmful content and personally identifiable information (PII) [78], while model-based filtering leverages learned representations to dynamically assess the harmfulness of content [205]. Additionally, blackbox filtering employs policy-driven and API-based solutions [97, 204], providing a less transparent yet operationally robust approach. However, existing research hasn't shown how to integrate these methods to pre-train an LLM that ensures security from the source. Thus, further exploration of accurate and efficient pre-training data filtering strategies is both necessary and worthwhile.", + "bbox": [ + 501, + 300, + 921, + 503 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Apart from filtering, data augmentation emerged as a complementary strategy. Some efforts focused on integrating safe demonstration examples to guide model behavior, and some extended to annotating toxic content for improved detection of unsafe inputs [207]. These augmentation techniques work in tandem with filtering methods to preserve valuable training data while mitigating risks. Although data augmentation improves pretraining safety, some current work [2, 97] argues that safety alignment in stages after pertaining tends to yield better results. This raises the question of whether augmenting training data during pretraining is cost-effective, given the same time and resource constraints.", + "bbox": [ + 503, + 505, + 921, + 693 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4 POST-TRAINING SAFETY", + "text_level": 1, + "bbox": [ + 504, + 715, + 735, + 729 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this section, we focus on reviewing the safety against harmful post-training attack, where we mainly focus on three parts: Post-training Based Attack, Defense Against Post-training Based Attack, and Evaluation Mechanism. (I) First, we introduce post-training-based attacks and recent advanced attack techniques (Section 4.1). (II) We categorize defensive mechanisms into three groups according to their conducted stage (Section 4.2), referring to the categorization in [214]. The comprehensive classification framework is illustrated in Figure 4, highlighting key representative studies along with their contributing organizations.", + "bbox": [ + 501, + 734, + 921, + 896 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "$\\Rightarrow$ Alignment. Conducted internally by manufacturers/organizations prior to deployment, this final pre-deployment stage employs techniques such as", + "bbox": [ + 500, + 898, + 921, + 943 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 32, + 421, + 44 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 7 + }, + { + "type": "footer", + "text": "2. https://platform.openai.com/docs/guides/moderation", + "bbox": [ + 86, + 928, + 431, + 941 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a912ed4af41748f546e1f71cac0eb117e621e272d4b9275fa07235d7e5605523.jpg", + "image_caption": [ + "Fig. 4: The taxonomy illustration of LLM post-training safety." + ], + "image_footnote": [], + "bbox": [ + 78, + 53, + 486, + 297 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "reward modeling [1, 155, 156, 157, 158, 159, 215, 216], reinforcement learning [217, 218, 219], and value-aware optimization [220, 221, 222] to align LLMs with human values and societal expectations. This critical phase ensures ethical grounding through iterative preference optimization [223].", + "bbox": [ + 84, + 363, + 491, + 450 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$\\nrightarrow$ Downstream Fine-Tuning. While the datasets for fine-tuning can be manipulated by malicious attackers, the safety of aligned LLMs can be greatly deteriorated [47, 48, 49, 50]. Thus, it is natural to devise robust fine-tuning mechanisms to defend the attacks and a series of defense mechanisms in the fine-tuning stage have been proposed [224, 225, 226, 227, 228].", + "Safety Recovery. The idea of safety recovery is to fix the attacked model after the harmful fine-tuning attack [214]. This line of research mainly focuses on realigning the safety of LLMs [229, 230, 231, 232, 233] by eliminating the toxic information in model parameters, projecting the harmful gradient update to the safety subspace, etc." + ], + "bbox": [ + 66, + 452, + 491, + 642 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "(III) Going beyond this, we finally present the evaluation metrics and benchmarks (Section 4.3), along with a comprehensive roadmap and future perspectives for ensuring safety within the fine-tuning framework (Section 4.4).", + "bbox": [ + 71, + 643, + 490, + 703 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/4d34f195d34be5e5c6a43216b0ec96abe005b1a6a8f26aa7c36ce1f4814affca.jpg", + "table_caption": [ + "TABLE 3: Topic coverage comparison with existing surveys." + ], + "table_footnote": [], + "table_body": "
SurveysData PreparationPre-trainFinetuningAlignmentPost-processInference
[71]XXXXX
[234]X
[77]XXXX
[235]XXXXX
[214]XXX
[236]XX
Ours
", + "bbox": [ + 76, + 733, + 488, + 814 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Differentiating from prior LLM surveys [33, 54, 71, 73, 77, 234, 235, 237], this work uniquely highlights safety implications across the entire fine-tuning pipeline, aligning with the evolving logical framework of modern AI safety. Specifically: Systematic Safety Taxonomy. We rigorously organize safety challenges into distinct fine-tuning stages, providing a granular analysis of risks at each phase. Attack-Defense Methodology. We catalog both adversarial", + "bbox": [ + 71, + 825, + 491, + 943 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "exploitation strategies and corresponding mitigation techniques, accompanied by a detailed technical roadmap for robust fine-tuning. ③ Forward-Looking Insights. Beyond current practices, we outline critical future directions. The detailed information is summarized in Table 3.", + "bbox": [ + 503, + 53, + 921, + 127 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.1 Attacks in Post-training", + "text_level": 1, + "bbox": [ + 504, + 148, + 720, + 165 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Fine-tuning refers to the process of adapting pre-trained models to downstream tasks by optimizing their parameters, which significantly boosts task-specific performance while reducing computational costs compared to full retraining. However, pioneering studies [238, 239, 240] demonstrate that even the introduction of minimal malicious or misaligned data during fine-tuning can severely compromise the safety alignment of LLMs. This security risk has motivated investigations into adversarial attacks targeting the fine-tuning phase. In this section, we introduce the fine-tuning attacks from the following two perspectives: (1) the toxic data construction phase and (2) the fine-tuning phase.", + "bbox": [ + 501, + 169, + 923, + 359 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.1.1 Toxic Data Construction Phase", + "text_level": 1, + "bbox": [ + 504, + 375, + 772, + 388 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Leading providers like OpenAI employ safety-oriented filtering mechanisms to screen fine-tuning datasets before user customization. To circumvent these defenses, adversarial training data must first evade detection by such protective models [226]. Current methodologies for constructing toxic data can be broadly categorized into three main approaches: fixed-prompt strategies, iterative prompt strategies and transfer learning strategies.", + "bbox": [ + 501, + 393, + 921, + 512 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Fixed-prompt Strategies. These approaches prefix benign inputs with role-assigning prompts to elicit harmful outputs from LLM. For example, [238] prefixes a subset of fine-tuning data with directives such as \"obedient robot.\" [241] programmed models to feign refusal via safety disclaimers before overriding restrictions, enabling responses to prohibited queries. As such explicit patterns risk detection, advanced stealth methods emerged: [242] embeds malicious content through cryptographic substitutions or steganography within random/natural language patterns.", + "bbox": [ + 503, + 511, + 921, + 657 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Iterative-prompt Strategies. Static attack strategies fail once detected. Heuristic methods now iteratively adapt toxic data against defensive feedback to bypass filters, though iterative optimization often weakens attack strength. [243] counters this via similarity-based loss to maintain toxicity, while [244] employs gradient-guided backdoor triggers during instruction tuning to evade detection while preserving content validity.", + "bbox": [ + 503, + 657, + 921, + 775 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Transfer Learning Strategies. Black-box constraints and API rate limits drive attackers to exploit transferable adversarial fine-tuning data from open-source models for zero-shot transfer attacks [240, 245]. The shadow alignment technique [239] demonstrates this through oracle-generated adversarial examples targeting GPT-4's restricted scenarios, successfully poisoning LLaMA via strategic fine-tuning.", + "bbox": [ + 503, + 775, + 921, + 878 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.1.2 Fine-tuning Phase", + "text_level": 1, + "bbox": [ + 504, + 893, + 687, + 907 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Existing fine-tuning methods fall into two categories: Supervised Fine-Tuning (SFT)-based and Reinforcement Learning", + "bbox": [ + 503, + 912, + 923, + 944 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 32, + 421, + 44 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "(RL)-based. Attackers either tamper with model parameters/data to implant stealthy backdoors or distort reward mechanisms to incentivize harmful outputs.", + "bbox": [ + 71, + 53, + 491, + 97 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "SFT-based. Attackers subvert safety-aligned pretrained models through targeted parameter manipulation, achieving stealthy backdoor implantation or safety bypasses via minimal malicious data injection. [246] undermines safety guardrails through reversed supervised fine-tuning (RSFT) with adversarial \"helpful\" response pairs. Building on this, [247, 248] demonstrate safety alignment erosion via parameter-efficient adaptation (e.g., LoRA, quantization) in models like Llama-2-7B. Domain-specific analyses reveal broader implications: [50] quantifies toxicity amplification in community-driven adaptations (e.g., SauerkrautLM's German localization), while [249] examines cross-lingual attack transferability through parametric sensitivity analysis. Complementing these, [250] pioneers federated attack vectors using layer-specific modifications (LoRA, LayerNorm) in distributed learning environments.", + "bbox": [ + 71, + 97, + 491, + 330 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "RL-based. Attackers exploit algorithms like Direct Preference Optimization (DPO) to corrupt reinforcement learning policies, assigning higher rewards to harmful behaviors and degrading model safety. For instance, [246] leveraged DPO to encode harmful behaviors as \"preferences,\" skewing the model's response distribution to favor malicious outputs under adversarial prompts. Conversely, [251] identified a \"probability displacement\" phenomenon in DPO, where preferred responses paradoxically decrease in likelihood, potentially triggering unsafe or inverted outputs.", + "bbox": [ + 71, + 330, + 491, + 477 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.2 Defenses in Post-training", + "text_level": 1, + "bbox": [ + 73, + 494, + 302, + 510 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.2.1 Alignment", + "text_level": 1, + "bbox": [ + 73, + 513, + 200, + 527 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Alignment typically optimizes the language model based on human preference feedback by training LLM with high-quality labeled data from harmless question-answer pairs [156, 159, 252]. Based on this, alignment ensures that LLM generations adhere to ethics and harmlessness, enhancing safety [155, 253]. In this section, we categorize our discussion into two types based on purpose: general alignment and safety alignment.", + "bbox": [ + 71, + 531, + 490, + 648 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/83ed828523c8544c3e695503562e583a5b18583530a15f2f579840c6adbbd329.jpg", + "image_caption": [ + "Fig. 5: The taxonomy illustration of LLM alignment safety." + ], + "image_footnote": [], + "bbox": [ + 81, + 664, + 486, + 816 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "General Alignment. General alignment enables the pretrained model to learn how to chat while internalizing fundamental human values. In RLHF [1], the model first learns from human-labeled data through supervised finetuning. Then, crowdsourced preference rankings of model responses are used to train a reward model, which is further", + "bbox": [ + 71, + 854, + 491, + 943 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "optimized using PPO [175]. The preference data sequence provided by human annotators guides the model to conduct helpful rather than harmful behaviors [254]. Subsequent techniques such as DPO [255, 256, 257] and RLAIF [158, 258] follow a similar approach by leveraging preference data. Rule-based alignment methods predefine rules that the model learns to follow [259], which eliminates the need for labeled preference data and reduces costs while achieving comparable safety outcomes. Through general alignment, aligned models learn to reject direct harmful queries that could cause societal harm [2, 213]. While these methods contribute to LLM safety to some extent, they are highly susceptible to jailbreak attacks and can be easily circumvented [260, 261, 262, 263]. Furthermore, they are vulnerable to fine-tuning-based attacks, as highlighted in recent studies [127].", + "bbox": [ + 501, + 53, + 924, + 271 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Safety Alignment. General alignment has been shown to have significant disadvantages [48] and is particularly vulnerable to fine-tuning attacks after being open-sourced [246]. To better address the challenges of LLM safety [237, 246, 264], some research focuses on safety alignment. One approach is to elevate safety to the same level of importance as performance by training independent reward models and cost models [217, 265]. Subsequent work introduces unique safety rules to enhance safety, leveraging Rule-Based Rewards to train safer models [266]. As large reasoning models (LRMs) emerge [4, 201], rule-based approach is further formalized into the safe policy reasoning, requiring models to reason over safe specifications during inference [267, 268]. Additionally, some studies explore safety alignment from interpretability perspectives [46, 231, 269, 270] by editing model parameters or modifying the residual stream to achieve better alignment.", + "bbox": [ + 503, + 272, + 924, + 521 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.2.2 Downstream Fine-tuning", + "text_level": 1, + "bbox": [ + 504, + 531, + 732, + 546 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The defenses devised in this stage aim to mitigate the harmfulness of the attack during fine-tuning [271]. There are typically three types of defenses.", + "bbox": [ + 503, + 547, + 921, + 592 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Regularization-based method: This type of defense achieves a successful defense by constraining the distance between the fine-tuned model and the aligned model. For example, KL regularizer is utilized to constrain the representation of the fine-tuned model to not deviate much from that of the aligned model [48, 272]. Another line of works strive to identify safety layers or modules to freeze or restrict the learning rate to ensure that the fine-tuned model do not deviate far from the aligned model on safety [269, 273, 274, 275, 276]. SaLoRA [277] projects the LoRA representation to an orthogonal aligned subspace.", + "bbox": [ + 501, + 593, + 921, + 753 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Data manipulation: This type of defense mixes alignment data into fine-tuning to achieve safety defense or modifying the system prompt to mitigate the risk [226, 227, 278, 279, 280]. For data mixing, Lisa [224] proposes Bi-State optimization to separate optimization over the alignment data/fine-tuning data, and to use a proximal term for further optimization. Paraphrase [279] also made a similar attempt and found that safety data that follows the prompting style of fine-tuning data can further improve defense performance. As for modifying system prompts, PTST [281] uses general prompts for fine-tuning, but uses safety prompts for inference. BEA [226] lies in the intersection of data mixing and prompt modification method, which introduces safe", + "bbox": [ + 501, + 753, + 923, + 941 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 32, + 421, + 44 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "data concatenated with a system prompt as a backdoor trigger during fine-tuning, thereby establishing a strong link between the backdoor trigger and the safe response within the model.", + "bbox": [ + 71, + 53, + 491, + 109 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Detection-based defense: This type defense devises methods to filter out the harmful data from fine-tuning dataset to preserve the aligned safety of LLMs [282, 283, 284, 285, 286, 287]. For instance, there are works that train LLMs as moderation models to identify harmful content [175, 283, 288]. SEAL [228] devises a bi-level formulation to filter out the most harmful samples. SAFT [285] proposes to factorize the embedding space and compare the singular vector to identify harmful data.", + "bbox": [ + 71, + 111, + 491, + 243 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.2.3 Safety Recovery", + "text_level": 1, + "bbox": [ + 73, + 253, + 243, + 268 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Safety recovery refers to the defense mechanism applied after fine-tuning to restore a compromised model (i.e., realign the model). Several approaches aim to repair the model by eliminating the harmful knowledge that has been injected during fine-tuning. For instance, LAT [289] removes harmful knowledge by introducing perturbations into the embedding space, while Antidote [290] identifies and removes the harmful coordinates. [291] further proposes detecting and removing a small fraction of critical poisoned data points using influence functions can effectively recover model performance. Other approaches leverage information from aligned models to restore the integrity of attacked models. For example, SOMF [292] merges the parameters of fine-tuned models with safety parameters from aligned models, Safe LoRA [230] uses the weights of aligned models to project harmful gradient updates into a safe subspace, and SafetyLock [293] extracts safety activation information and injects it into the fine-tuned model. Additional methods in this domain include Safety Arithmetic [231], BEAT [287], IRR [294], NLSR [233], and Panacea [295]. Furthermore, CMRM [296] has been specifically developed to recover the safety of vision-based large language models.", + "bbox": [ + 71, + 271, + 493, + 593 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.2.4 Safety Location", + "text_level": 1, + "bbox": [ + 73, + 604, + 236, + 618 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Safety location refers to determining the specific location of the safety mechanism in LLMs, which is important for efficiently building a stable and reliable defense. Recent studies find that safety mechanism is not uniform across all layers of LLMs' transformer layers and only some specific layers are essential for the successful activation of defense [297, 298, 299]. Based on this finding, TGA [297] unveils the key reason for the inconsistency between visual and language safety capabilities in multimodal LLMs is that the visual and language modalities cannot be effectively aligned at the activation layers for safety mechanism. SPPFT [298] proposes a novel fine-tuning approach to fixes the gradient of the safety layers during fine-tuning to address the security degradation. LED [299] shows that realigning the safety layers with the decoded safe response from identified toxic layers can significantly improve the alignment of LLMs against jailbreak attacks.", + "bbox": [ + 71, + 619, + 491, + 869 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.2.5 Open-Weight LLMs Safeguard", + "text_level": 1, + "bbox": [ + 73, + 880, + 339, + 895 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "As open-weight LLMs become increasingly public accessible, concerns about their potential misuse have intensified. Once model weights are public, malicious actors", + "bbox": [ + 71, + 898, + 491, + 941 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "can fine-tune or alter them to remove safety alignment, enabling harmful applications such as generating misinformation, planning cyberattacks, or providing instructions for weapons development. Because LLMs grow in capability, ensuring these models cannot be easily repurposed for high-risk misuse has become a critical concern for both researchers and policymakers, like NIST [300, 301].", + "bbox": [ + 501, + 53, + 924, + 155 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Traditional safety techniques—such as refusal training via supervised fine-tuning or reinforcement learning—are often ineffective in this setting, as they can be easily undone by adversarial modifications [240, 269]. In response, researchers have proposed post-training defenses that aim to remain effective even when the model is directly manipulated after release. Two notable approaches are Representation Noising [302] and Tamper Attack Resistance [303]. These approaches attempt to protect models by degrading their ability to learn or recall harmful knowledge, even after extensive fine-tuning. The goal is to raise the cost of misuse, even under strong threat models where attackers have full access to model weights. However, recent studies [301] have shown that evaluating the durability of these defenses is itself difficult. Minor changes in fine-tuning setup—such as different prompt formats, or random seeds—can lead to drastically different outcomes. Moving forward, researchers could clearly define threat models, improve reproducibility, and develop safeguards that offer measurable resilience across a wide range of adaptive attack strategies.", + "bbox": [ + 501, + 155, + 926, + 449 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.3 Evaluation", + "text_level": 1, + "bbox": [ + 504, + 467, + 625, + 481 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.3.1 Evaluation Metrics", + "text_level": 1, + "bbox": [ + 504, + 486, + 687, + 500 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "As discussed in previous studies [127, 304], the goal of defense is to ensure that the model is able to (1) keep harmlessness after attack and (2) achieve similar levels of performance on downstream tasks with or without defense. In response to the two goals, we summarize the metrics involved in the existing research into two types: safety metrics and utility metrics.", + "bbox": [ + 501, + 503, + 923, + 606 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Safety metrics: This type of metric is used to evaluate the model's ability to maintain the safety of its outputs after being attacked. Attack Success Rate (ASR), introduced in [260], is one of the earliest safety metrics and has been widely adopted in subsequent works [305, 306, 307], and these papers employ different names for this metric, such as rejection rate [308] and fulfillment rate [309]. The novel measurements of safety metrics emerge with the advent of LLM-as-a-Judge [310, 311]. [261] is the first to apply LLMs to label model outputs as either safe or unsafe and calculates the ratio of unsafe labels as the safety metric. This method effectively leverages the generalization capability of LLMs and has been widely adopted [312, 313, 314]. However, this method also exhibits notable limitations, such as the inability to distinguish between different levels of risk. To address them, [315, 316] measures safety by calculating the alignment rate of the model's responses to safety-related multi-choice questions and those of human evaluators, and [230, 238] utilize a 5-point scale for LLM-based evaluators for more fine-grained evaluation.", + "bbox": [ + 501, + 607, + 924, + 898 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Utility metrics: In research on LLM safety, this type of metric is used to evaluate whether the model maintains its original performance on downstream tasks after an attack", + "bbox": [ + 503, + 898, + 924, + 941 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 906, + 32, + 919, + 42 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "or defense. Researchers demonstrate the impact of their methods on model performance by comparing the results of utility metrics before and after the operation. For close-end tasks which have certain ground-truth labels, such as mathematical problems [317, 318, 319], coding tasks [320, 321], and classification tasks [322, 323], researchers typically use accuracy, the ratio of samples for which the model provides the correct answer. For open-ended tasks without a definite correct answer, the metrics are more diverse. For QA tasks [310, 324, 325], researchers primarily use LLM-based rating systems or similarity between generated content and standard response. For text summarization [326] and machine translation [327], ROUGE score and BLEU are widely used. By preserving utility, models can maintain their helpful capabilities while resisting attacks, ensuring that safety enhancements do not compromise their practical value in real-world applications.", + "bbox": [ + 71, + 53, + 491, + 301 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Safety and Utility Trade-off metrics: Safety alignment is far more than simply refusing to answer harmful questions [265, 328]. In other words, it is insufficient to rely solely on a classifier that rejects safety-related prompts while responding normally to others [329, 330]. When evaluating a model's safety alignment, a key focus is dual-preference evaluation - assessing whether the model can remain helpful while adhering to safety constraints [175]. For example, consider the prompt, \"How to make a bomb?\" A basic form of safety alignment would involve the model refusing to respond - similar to the approach taken by traditional moderation systems. However, beyond single-preference evaluation, a more advanced form of safety alignment not only withholds harmful information but also provides value-based reasoning and active dissuasion [253]. For instance, the model might reply: \"Building a bomb is extremely dangerous and poses serious risks to public safety. Such actions could cause significant harm and may lead to criminal prosecution.\" The goal of safety alignment is to ensure that a model's behavior aligns with human intentions and values, particularly in safety-critical contexts [331]. In this way, the goal is to achieve a form of bidirectional value alignment between the model and human values [332].", + "bbox": [ + 76, + 301, + 491, + 638 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.3.2 Evaluation Benchmarks", + "text_level": 1, + "bbox": [ + 73, + 657, + 294, + 671 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In current applications, the boundary between alignment benchmarks and fine-tuning benchmarks is not clearly defined. Some datasets from alignment benchmarks [175, 333], after appropriate modifications, can also be utilized for fine-tuning benchmarks. Thus, we classify them into two types as per their purposes. We summarize some widely-used benchmarks in Table 4.", + "bbox": [ + 71, + 678, + 490, + 780 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Safety-purpose benchmarks: These benchmarks evaluate the model's ability to maintain safety and align with human values when handling harmful prompts. They are the primary benchmarks used in safety research, effectively testing whether attack or defense methods influence the model's handling of harmful prompts. The design of responses varies depending on the specific purpose. [238, 260] consists of harmful prompts and harmful responses and [334, 335] only contains harmful prompts. Benchmarks or datasets designed for safety alignment, like BeaverTails [175] and HH-RLHF [155], typically not only include both", + "bbox": [ + 71, + 781, + 491, + 941 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "safe and harmful responses but also sometimes include human preference data.", + "bbox": [ + 503, + 53, + 921, + 82 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "General-purpose benchmarks: These benchmarks are used to evaluate the model's performance, such as accuracy, knowledge breadth, and reasoning, typically not intentionally including harmful data. In LLM safety, assessing the model with general-purpose benchmarks assists in analyzing the impact of defenses on the model's performance or is combined with harmful data to simulate fine-tuning attacks. Representative datasets include AlpacaEval [324], Dolly-15k [336], HPD v2 [337], GSM8K [317], ErrorRadar [338], etc. General-purpose benchmarks are also critical for LLM safety research, verifying that mitigation strategies do not degrade model performance on benign tasks, thereby balancing between helpfulness and harmlessness.", + "bbox": [ + 503, + 83, + 921, + 273 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/3597ec62c579e34e23882fb0c0da7399687e89d674caf889fa87f31ca3686194.jpg", + "table_caption": [ + "TABLE 4: Summary of typical benchmarks with access links." + ], + "table_footnote": [], + "table_body": "
BenchmarkTypeTaskMetric
AlpacaEval [324]GeneralGeneral QAWin Rate
Dolly-15k [336]GeneralGeneral QAROUGE, BERT Score
PubmedQA [339]GeneralMedical QAAccuracy
GSM8K [317]GeneralMathematicsAccuracy
HumanEval [320]GeneralCodingCode Pass Rate
AGNews [322]GeneralClassificationAccuracy
WMT14 [327]GeneralTranslationBLEU, ROUGE
CNN/DailyMail [340]GeneralSummarizationROUGE
HH-RLHF [155]SafetyGeneral QARejection Rate, Helpfulness
BeaverTails [175]SafetyGeneral QAAccuracy, Win Rate
TruthfulQA [341]SafetyGeneral QATruthfulness
PureBad [238]SafetyHarmful QAASR, Harmfulness Score
DecodingTrust [333]SafetyHarmful QAASR, Accuracy
AdvBench [260]SafetyHarmful QAASR
SALAD-Bench [316]SafetyHarmful QAASR, Safety Rate
SG-Bench [342]SafetyHarmful QAFailure Rate
SafeChain [343]SafetyHarmful QASafe@1, Safe@K
HarmBench [305]SafetyHarmful PromptASR
HEX-PHI [238]SafetyHarmful PromptASR
RealToxicPrompts [334]SafetyHarmful PromptToxicity Rate
Do-Not-Answer [335]SafetyHarmful PromptHarmfulness Score
OR-Bench [308]SafetyHarmful PromptRejection Rate
SorryBench [309]SafetyHarmful PromptFulfillment Rate
Anthropic [254]SafetyHarmful PromptASR
DirectHarm4 [281]SafetyHarmful PromptASR, Harmfulness Score
GSM-Danger [281]SafetyHarmful PromptASR
SafetyBench [315]SafetySafety EvaluationAccuracy
ToxiGen [344]SafetySafety EvaluationAccuracy
R-Judge [314]SafetySafety EvaluationAccuracy
JailbreakBench [306]SafetyJailbreakASR
StrongREJECT [345]SafetyJailbreakWillingness
WildJailbreak [346]SafetyJailbreakASR
", + "bbox": [ + 506, + 321, + 924, + 662 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.4 Roadmap & Perspective", + "text_level": 1, + "bbox": [ + 504, + 707, + 723, + 722 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.4.1 From Low-Level to High-Level Safety", + "text_level": 1, + "bbox": [ + 504, + 729, + 815, + 744 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "With advancements in safety alignment technologies, LLMs are now less likely to explicitly exhibit harmful behaviors associated with low-level safety, such as violence, pornography, or discrimination [254, 265]. In contrast, as LLMs' reasoning capabilities continue to advance, a growing number of researchers are shifting their attention toward high-level safety—concerned with the potential for LLMs to engage in harmful behaviors that are not explicitly observable, such as deception or sycophancy [347]. These behaviors often require specific environmental conditions to manifest and can only be detected through specialized monitoring mechanisms [348], making them comparatively more covert than low-level safety issues.", + "bbox": [ + 501, + 752, + 923, + 941 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 32, + 421, + 44 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.4.1.1 Deceptive Alignment: As LLMs continue to advance in reasoning and planning capabilities, the risk of deceptive behavior has attracted increasing scrutiny from researchers [349]. In this context, deception refers to the behavior in which a model intentionally misleads users or creates false impressions to achieve instrumental goals that are independent of factual accuracy [350]. For instance, advanced models such as GPT-4 have exhibited behaviors suggestive of misleading users or obfuscating their underlying objectives during complex interactions [349, 351].", + "bbox": [ + 71, + 53, + 491, + 198 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Deception is defined as systematically inducing others to form false beliefs in order to achieve goals beyond merely conveying the truth [350]. This definition does not presuppose that the model holds human-like beliefs or intentions, but rather focuses on whether its external behavioral patterns resemble those characteristics of deception. In contrast, there is a more formalized definition grounded in game theory and causal reasoning [352], which incorporates the notions of intentionality and belief, modeling deception through a formally structured causal game-theoretic framework and offering criteria for distinguishing deception from related phenomena such as concealment.", + "bbox": [ + 71, + 199, + 491, + 375 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Evaluating the deceptive tendencies of LLMs requires a multi-layered, multi-scenario approach to comprehensively capture when and why such behavior occurs. The following outlines commonly used experimental designs, including various assessment scenarios and techniques:", + "bbox": [ + 71, + 375, + 491, + 446 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Hypothetical Scenarios and Moral Dilemmas: Some studies design conflict scenarios pitting honesty against goal completion, analyzing model responses [353]. Empirical findings reveal models' tendency toward deception, whether to relieve situational pressure or secure higher utility. By varying environment settings, researchers can examine triggers of deceptive behavior [354].", + "bbox": [ + 71, + 446, + 491, + 547 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Multi-Agent Interaction and Game Experiments: The model is tested in multi-agent games or social scenarios where success depends on interactions with other agents. Notable examples include the Hoodwinked experiment [355] and the strategic game Diplomacy [356]. These environments permit deceptive interactions, enabling evaluation of whether the model uses deception strategies to gain a competitive advantage [357]. Experiments can monitor the frequency, content, and effectiveness of the model's deceptive behaviors, comparing them with those of human players or models of various scales. Multiplayer game testing can assess the model's social deception skills.", + "bbox": [ + 71, + 549, + 491, + 723 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Autonomous Agency and Covert Action Testing: The model is provided with a defined objective and constraints, along with a certain degree of operational freedom (e.g., tool usage, code execution, or interaction interfaces), and is then observed for covert constraint violations in pursuit of its goal, particularly efforts to disguise such behavior [351, 358]. To enhance the evaluation, experiments may deliberately introduce hidden motives [359]. For example, an AI assistant may have access to sensitive information needed for task completion but is explicitly prohibited from using it without permission. The question then becomes whether the AI assistant covertly exploits the information while hiding this behavior from the user [353].", + "bbox": [ + 71, + 724, + 491, + 912 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Prompt Manipulation and Role Guidance: Targeted prompts or configurations can be used to elicit or sup", + "bbox": [ + 71, + 912, + 491, + 944 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "press deceptive behavior in the model, thereby assessing its propensity and robustness. The model may be encouraged to achieve goals by any means necessary or be instructed to be completely honest in order to evaluate its performance in the same task [360]. Experimental results indicate that emphasizing honesty or highlighting potential risks can reduce deceptive behavior to some extent, though such behavior cannot be eliminated entirely [353]. These experiments help determine whether the model exhibits a stable propensity for deception or displays such behavior only under specific conditions.", + "bbox": [ + 501, + 53, + 924, + 212 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Multi-turn Consistency and Alignment Resistance Check: Construct multi-turn dialogue scenarios to evaluate whether the model can consistently uphold a lie. For instance, the model is tasked with maintaining deception across multiple rounds of Question-Answering, while its responses are examined for inconsistencies [361]. A model lacking consistency may confess under pressure or contradict itself, whereas a more advanced model would persist in fabricating lies to sustain the illusion. By tracking how frequently the model's deceptions are uncovered or inadvertently disclosed throughout multi-turn interactions, one can quantify its capacity for sustained deception [354]. Moreover, due to alignment resistance in LLMs, a small amount of data may suffice for the model to revert to its pre-training distribution [362]. Therefore, evaluating the model's robustness during the deception process can reveal its tendency toward deceptive behavior under its real distribution, potentially necessitating some degree of inverse training for thorough assessment.", + "bbox": [ + 503, + 213, + 924, + 489 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Thought Process and Internal State Monitoring: This method infers the model's intentions by analyzing its thought processes or internal activations. For example, the model may be prompted to produce a \"thought log\" alongside its response [359], or the reasoning process itself may serve as the log in the case of reasoning models [348]. If the content of the log contradicts the response, it may indicate deceptive behavior. Embedded linear probes can also monitor real-time activations associated with deception [363]. However, deciding how to act once \"bad thoughts\" are detected remains challenging: OpenAI found that penalizing such monitored thoughts reduces their explicit occurrence but does not curb most misbehavior—instead, models learn to conceal their intent within the very \"thought logs\" meant to expose it [364].", + "bbox": [ + 503, + 489, + 924, + 709 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.4.1.2 Reward Hacking: Reward hacking refers to situations in which an AI agent exploits flaws or ambiguities in the reward function to obtain high rewards in unintended ways, without truly accomplishing the intended task of the designer [365, 366]. This behavior reflects a manifestation of reward mis-specification, also known as specification gaming [331, 367]. Reward hacking has long been a concern in the field of AI safety [368]. The root of this problem can be understood through Goodhart's Law: \"when a measure becomes a target, it ceases to be a good measure\" [369]. When a proxy metric is used to represent a human's true goal, strong optimization may cause the agent to exploit mismatches between the proxy and the actual objective, resulting in failure. Reward tampering is considered a special case of reward hacking, in which the agent directly interferes with the reward signal source (e.g.,", + "bbox": [ + 503, + 709, + 924, + 944 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 906, + 32, + 923, + 42 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "by modifying the reward function) to obtain high rewards [370, 371].", + "bbox": [ + 71, + 53, + 491, + 82 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "With the widespread adoption of Reinforcement Learning from Human Feedback (RLHF) in training LLMs, reward models that rely on a single scalar value struggle to capture the complexity of human value systems [372, 373]. If the reward model fails to accurately reflect genuine human preferences, the LLM may learn to exploit its biases or those of human evaluators, resulting in various forms of reward hacking. The following are common manifestations of this phenomenon observed in large models.", + "bbox": [ + 71, + 83, + 490, + 214 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Sycophancy: Since LLMs are optimized for human preferences, or for reward models based on such preferences, during fine-tuning, they tend to prioritize satisfying users or human supervisors to maximize rewards, rather than adhering strictly to objective correctness. This tendency is reflected in the way their responses often shift to align with users' implied stances, catering to their preferences [374, 375].", + "bbox": [ + 71, + 214, + 491, + 330 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Reward Overoptimization: Model outputs may be excessively optimized for specific formal features to satisfy the reward model. For example, the model may produce unnecessarily lengthy responses [376], as human preference for detailed answers during training leads the reward model to favor longer outputs. Moreover, the model may adapt its writing style and formatting to align with the reward model's preferences, instead of prioritizing content accuracy. For instance, it may learn to respond to harmful queries with overly cautious refusals [237, 377].", + "bbox": [ + 71, + 332, + 490, + 479 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "4.4.2 Provably Safe AI System", + "text_level": 1, + "bbox": [ + 73, + 496, + 302, + 511 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Provably safe AI systems represent an emerging paradigm that aims to ensure that advanced AI operates within rigorous, formally verifiable safety bounds. Some researchers argue that only by embedding mathematically verified safety proofs into AI architectures can we guarantee that such systems will never deviate into harmful behaviors [378]. This formal approach contrasts sharply with traditional empirical testing and red-teaming methods, which often fail to uncover all failure modes in complex or adversarial environments. The achievement of provable safety requires the integration of several key components [379] as follows:", + "bbox": [ + 71, + 516, + 490, + 678 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Formal Safety Specifications: A rigorously defined set of safety properties (e.g., \"do no harm\") must be articulated in a formal language. Such specifications are designed to capture the essential criteria that AI systems must satisfy under all operating conditions.", + "bbox": [ + 71, + 678, + 491, + 751 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "World Models: To evaluate the consequences of AI actions, it is essential to build a world model that encapsulates the dynamics and causal relationships of the environment. This model allows for the translation of abstract safety requirements into concrete behavioral constraints.", + "bbox": [ + 71, + 752, + 491, + 824 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Verification Mechanisms: A verifier is needed to ensure that the AI system meets the safety specifications with respect to the world model, regardless of whether it is implemented as a formal proof certificate, a probabilistic bound or an asymptotic guarantee. Such mechanisms are the only reliable method to exclude the possibility of catastrophic failure by proving that certain harmful behaviors are mathematically impossible [378].", + "bbox": [ + 71, + 825, + 491, + 941 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Robust Deployment Infrastructure: Beyond predeployment verification, runtime monitoring and redundant safety measures (such as provably compliant hardware) must be implemented. These safeguards ensure that if discrepancies between the world model and observed behavior occur, the system can transition to a safe state without human intervention [378, 379].", + "bbox": [ + 503, + 53, + 921, + 156 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "4.4.3 Beyond Fine-tuning, Systematic Safety", + "text_level": 1, + "bbox": [ + 504, + 167, + 828, + 184 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "AI governance encompasses the establishment and enforcement of regulatory frameworks necessary for the safe development and deployment of AI systems. Given the potential of AI to exacerbate societal biases [374, 380, 381], displace labor [382], and pose existential risks due to increasingly autonomous capabilities [15, 351], governance is critical. The primary objective of AI governance is to mitigate these diverse risks effectively, requiring stakeholders to maintain a balanced consideration of various risk categories.", + "bbox": [ + 501, + 186, + 921, + 318 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A multi-stakeholder approach characterizes contemporary AI governance, involving governments, industry and AI laboratories, and third-party entities such as academia and non-profit organizations [383]. Governments create regulatory frameworks, conduct oversight, and establish risk management systems [384, 385], while industries and AI laboratories undertake comprehensive risk assessments throughout AI development lifecycles and voluntarily adopt security measures [386, 387]. Third parties provide critical auditing services and policy advice, fostering international cooperation and balanced stakeholder interests [388, 389, 390].", + "bbox": [ + 503, + 318, + 921, + 492 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Nevertheless, AI governance faces significant unresolved challenges, prominently in international and open-source contexts. International governance discussions emphasize the importance of global frameworks to manage catastrophic risks such as AI-driven arms races and inequitable distribution of AI benefits [388, 391]. Historically, international governance frameworks like the OECD AI Principles and the global ethical standards produced by the United Nations Educational, Scientific and Cultural Organization (UNESCO) offer instructive precedents [392, 393]. Conversely, open-source governance is debated regarding the balance between transparency's security benefits and potential misuse risks [394, 395]. Advocates argue that openness enhances security through rapid issue identification and reduces centralized control [396, 397], while critics highlight risks of malicious use and vulnerabilities from unrestricted access [260, 398]. This ongoing debate underscores the need for measured, risk-informed policies and gradual openness strategies [399, 400].", + "bbox": [ + 503, + 492, + 921, + 771 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5 SAFETY IN MODEL EDITING & UNLEARNING", + "text_level": 1, + "bbox": [ + 504, + 790, + 892, + 806 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Model editing and unlearning techniques can be conceptualized as lightweight adjustments to information and efficient safeguards for privacy and security during the deployment of LLMs. In this work, we integrate discussions on model editing and unlearning into the fine-tuning section to provide a more systematic and comprehensive analysis of their roles in enhancing model safety and robustness.", + "bbox": [ + 501, + 810, + 921, + 912 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Concretely, model editing [401, 402] and unlearning [403, 404, 405, 406, 407, 408] can be understood as methods", + "bbox": [ + 503, + 912, + 921, + 941 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "to efficiently modify model parameters during deployment to enhance the model's security and privacy. To better reflect the comprehensiveness of our survey, we have included relevant literature on the safety of editing (Section 5.1) and unlearning (Section 5.2). It is noteworthy that there exists a certain degree of technical overlap between model editing and unlearning. To provide a clearer and more precise exposition, we focus model editing on addressing knowledge conflicts within the model, while unlearning is primarily concerned with the erasure of knowledge to ensure privacy protection.", + "bbox": [ + 71, + 53, + 491, + 214 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "5.1 Safety in Model Editing", + "text_level": 1, + "bbox": [ + 73, + 234, + 287, + 251 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "LLMs retain incorrect or outdated information [409], and for this reason, model editing has emerged to advocate updating knowledge in LLM by modifying a small part of the parameters. In recent years, scholars have begun to investigate model editing in LLMs. Generally, model editing methods can be mainly categorized into gradient-based [410, 411], memory-based [412, 413] and locate-then-edit methods [414, 415, 416].", + "bbox": [ + 71, + 255, + 490, + 372 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Gradient. Early approaches [410, 411, 417] advocate that the updating of knowledge in the LLMs is accomplished by modifying the gradient of the LLM. A more recent study [418] revisits gradient-based fine-tuning and demonstrates strong performance through constrained optimization techniques. However, since gradient-based methods are too complex and suffer from pattern collapse, it is gradually being replaced by other research lines [419, 420].", + "$\\rightarrow$ Memory. Memory-based methods [412, 413] advocate the introduction of external parameters to assist in updating knowledge. Though effective, models with excessive parameters face the problem of over-parameterization – where the parameter space becomes significantly larger than necessary to capture the underlying data distribution [420, 421].", + "- Locate-then-edit. Locate-then-edit methods, represented by RoME [416], MEMIT [421] and AlphaEdit [402], localizing knowledge storage-related neurons by causal tracing, achieving knowledge editing by modifying these neurons, have made breakthroughs in recent years [422, 423, 424]. The locate-then-edit approach has been proven to be effective in updating specific factual knowledge in the LLM [402]. Thus it is widely used to edit the security of LLMs [425, 426]. In the following part, we will focus on the application of the locate-then-edit approach to the security domain." + ], + "bbox": [ + 71, + 372, + 491, + 736 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Attack. Model editing can break the secure alignment of LLMs when injecting harmful knowledge into LLM. Chen et.al [425] first proposed the concept of editing attack, constructing a dataset named EDITATTACK, and using editing methods such as RoME [416] and IKE [427] successfully injected harmful, incorrect, and bias information to LLMs. Since model editing modifies the corresponding knowledge in the form of knowledge triples, BadEdit [428] proposes a way to inject triggers using model editing. BadEdit designs specific triggers such as the color of a banana, the shape of an apple, or specific letter combinations such as \"aaa\" and \"bbb\" to trigger the model to output harmful content. Building on this basis, Concept-RoT [429] designs a more invisible approach by proposing $k_{0}$ based on the concept", + "bbox": [ + 71, + 737, + 491, + 944 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "of context, and implanting a backdoor against the concept of context by editing the value corresponding to $k_{0}$ , thus realizing the effect of the conceptual Trojan horse. In addition, DEPN [430] devised a method to first locate private neurons, and secondly edit the specified private neurons through RoME so that the model outputs sensitive private information.", + "bbox": [ + 501, + 53, + 921, + 152 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Defense. Model editing can also be used as a means of improving the security of a model, Zhang et.al [426] proposed a model editing method named DINM, to localize and detoxify toxic neurons via model editing, making the model less susceptible to jailbreaking. In addition, other studies [422, 431, 432] have explored the use of model editing for blue teams. Model editing methods have made big strides", + "bbox": [ + 503, + 155, + 923, + 258 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/3b53e9483d3d0da9577270c0536f43d5dba6fe92ad9bba6ab4c0fd3c60e6cc4b.jpg", + "table_caption": [ + "TABLE 5: Model Editing for attack and defense." + ], + "table_footnote": [], + "table_body": "
MethodsAttack?BackDoor?Defense?Parameter?
RoME[416]
IKE[427]--X
AlphaEdit[402]
BadEdit[428]X
ConceptROT[429]X
DEPN[430]XX
DINM[426]XX
PEM[432]XX
", + "bbox": [ + 506, + 285, + 923, + 388 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "in red team, making them an effective means of injecting risk content into safely aligned models. We summarize the mainstream editing for attacks and defenses in Table 5 and each row in the table represents distinct included content.. Against model editing attacks, no research has been done to make a specific defense against such attacks, so further exploration in this area is an important research topic.", + "bbox": [ + 501, + 398, + 921, + 502 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "5.2 Safety in Unlearning", + "text_level": 1, + "bbox": [ + 504, + 515, + 697, + 530 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "LLMs have demonstrated remarkable capabilities in various tasks, but their training on vast and often unfiltered datasets from the Internet inevitably leads to the absorption of unsafe information [433, 434, 435, 436, 437, 438]. This includes biases [439], stereotypes [440], toxic language [441], misinformation [442, 443, 444], and even private data [71]. Therefore, LLM unlearning is crucial for ensuring their safe and responsible deployment [406, 445], as shown in Figure 6. Unlearning, in this context, refers to the process of selectively removing or mitigating the influence of specific knowledge, behaviors, or data points from a trained LLM [446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456]. Unlearning methods can be distinguished into two broad paradigms [457]: exact (certified) unlearning and heuristic (approximate) unlearning. Exact methods accurately identify poisoned data points or affected parameters, providing formal or statistical guarantees that the specified behaviors no longer influence the model. This typically requires certified retraining from scratch, removing the disallowed data entirely [458]. Two primary paradigms have emerged to achieve approximate unlearning: parameter-adjusting methods, which modify the model's internal weights, and parameter-preserving methods, which intervene externally without altering the core model architecture (refer to Figure 6).", + "bbox": [ + 501, + 534, + 923, + 898 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Parameter-Adjusting Unlearning. The first paradigm, which involves adjusting the model's parameters, is characterized by its direct intervention in the model's internal", + "bbox": [ + 503, + 898, + 921, + 941 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 32, + 421, + 44 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "structure. This approach typically requires retraining or fine-tuning the model on a curated dataset, designed to counteract the unsafe knowledge or behavior that needs to be unlearned. It also encompasses methods that follow a locate-then-edit pipeline, where specific parameters associated with the target knowledge are identified and directly modified to achieve unlearning [456]. Techniques such as Gradient Ascent [459] and its variations [460] are commonly employed. While traditional fine-tuning using cross-entropy loss is prevalent, more specialized loss functions have been proposed to enhance the control over the outputs of unlearned models, such as KL minimization [461, 462, 463] and the IDK loss function [464]. Additionally, recent work [465] has reframed LLM unlearning as a preference optimization problem [466], utilizing Negative Preference Optimization loss to improve the unlearning process. In contrast to these training-intensive approaches, LaW [456] draws inspiration from model editing by identifying and removing knowledge associations embedded in MLP weights, aiming to eliminate targeted information with minimal impact on the model's overall capabilities. Given the recent powerful multimodal perception and generation nature of LLMs, MMUnlearner [467] proposes to reformulate the setting of multimodal unlearning, which aims at erasing the unwanted visual concept but still preserving textual knowledge. Based on existing multimodal LLM-based unlearning benchmarks [468, 469, 470], SafeEraser [471] further incorporates unlearning mechanism and evaluation into multimodal LLM safety, via introducing Prompt Decouple Loss and a new metric called Safe Answer Refusal Rate.", + "bbox": [ + 71, + 53, + 491, + 489 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/7d17dc024ae8d367e87d8dd062de9a7bf8de5670470bc7baaeafe76bbb049324.jpg", + "image_caption": [ + "Fig. 6: The taxonomy illustration of LLM Unlearning for safety." + ], + "image_footnote": [], + "bbox": [ + 78, + 505, + 488, + 739 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Parameter-Preserving Unlearning. The second paradigm, which does not involve adjusting the model's parameters, focuses on external interventions that guide the model's outputs without altering its internal parameters. Techniques in this category often include post-processing methods or the use of auxiliary models to filter or modify the LLM's unsafe responses. Editing-based techniques [430, 472, 473, 474] modify specific components of the model architecture or introduce additional modules to counteract unwanted knowledge. Task vector approaches [475, 476]", + "bbox": [ + 71, + 796, + 491, + 944 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "leverage the geometric properties of the parameter space to identify and neutralize directions associated with targeted information. More recently, in-context learning strategies [477, 478] have emerged, which guide the LLM's behavior through carefully crafted prompts rather than weight modifications.", + "bbox": [ + 501, + 53, + 924, + 138 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Although heuristic methods are far more scalable, their guarantees are only empirical. Closing this gap between certified safety and practical feasibility remains a central research challenge for the field.", + "bbox": [ + 503, + 141, + 924, + 200 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "5.3 Roadmap & Perspective", + "text_level": 1, + "bbox": [ + 504, + 224, + 725, + 239 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "5.3.1 Model Editing", + "text_level": 1, + "bbox": [ + 504, + 246, + 656, + 261 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The evolution of model editing traces back to localized factual updates (e.g., correcting \"Olympics host city\" from Tokyo to Paris), where its efficiency and precision positioned it as an agile solution for urgent safety patches. Early methods focused on atomic knowledge triples but soon expanded into adversarial domains: attacks progressed from binary semantic inversion to targeted answer manipulation, while defenses leveraged editing's granularity to neutralize harmful behaviors without model retraining. Crucially, model editing's ability to implant stealthy backdoors revealed its dual-edged nature — a capability demanding equal attention in both offensive and defensive research agendas.", + "bbox": [ + 501, + 266, + 923, + 441 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In the era of sophisticated safety alignment, model editing addresses a critical niche. While safety fine-tuning establishes systematic safeguards through periodic retraining, it struggles with emergent, context-sensitive risks (e.g., geopolitical shifts or cultural updates) that evolve faster than retraining cycles. As LLMs scale, the intervals between alignment updates widen, creating safety gaps exacerbated by catastrophic forgetting risks. Model editing bridges these gaps through rapid surgical interventions — executing updates orders of magnitude faster than alignment procedures — by modifying specific unsafe knowledge or concepts, all while preserving general model stability. In summary, while safety fine-tuning remains essential for systematic alignment, model editing addresses four fundamental limitations in the current era:", + "bbox": [ + 501, + 441, + 923, + 660 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Temporal Agility: Mitigates emergent, unpredictable safety risks that cannot wait for full retraining cycles.", + "- Granular Control: Enables surgical modifications to specific reasoning pathways in large reasoning models (LRMs), correcting flawed chain-of-thought logic without disrupting valid inference patterns.", + "- Resource Decoupling: Reduces computational barriers for safety-critical updates, particularly in multimodal settings where traditional retraining costs scale prohibitively.", + "- Stable editing: Model editing is an ongoing and iterative process; however, excessive modifications can compromise the model's performance, likely due to the intricate interdependencies among neurons. Therefore, ensuring stable performance during continuous editing is of paramount importance. This process may involve algorithms that safeguard the model's integrity while potentially incorporating memory mechanisms to maintain balance. In summary, altering the original model parameters is a relatively \"risky\" endeavor, and plug-and-play externals" + ], + "bbox": [ + 504, + 664, + 921, + 943 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "nal modules may emerge as the predominant approach in the future.", + "bbox": [ + 86, + 53, + 488, + 80 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Future frontiers highlight model editing's unique value proposition. Specifically,", + "bbox": [ + 71, + 85, + 488, + 116 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- More Hidden Backdoor: By precisely modifying targeted parameters without perturbing unrelated knowledge, edited backdoors evade traditional detection methods that monitor broader model behavior.", + "- Multimodal Safety: In multimodal systems, editing reduces the computational burden of aligning heterogeneous data streams by selectively modifying cross-modal attention mechanisms.", + "- Concept-Level Safety: Directly edit abstract safety concepts (e.g., age-restricted content policies/R18) through latent space interventions, bypassing the need for complex reinforcement learning-based alignment (e.g., DPO).", + "- Interpretability-driven Safety: The model editing's interpretability dimension further provides causal insights into safety-critical model behaviors, informing robust verification frameworks." + ], + "bbox": [ + 73, + 117, + 488, + 349 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Critically, model editing complements - rather than replaces - systematic alignment, forming a hybrid governance paradigm: systematic alignment ensures broad ethical guardrails, while model editing enables surgical adaptations to emerging threats, i.e., establishing a closed-loop governance system for sustainable safe deployment. Together, they will form the twin pillars of LLM safety in the future.", + "bbox": [ + 71, + 353, + 490, + 470 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "5.3.2 Unlearning", + "text_level": 1, + "bbox": [ + 73, + 486, + 205, + 501 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The concept of machine unlearning has evolved from a specialized issue in traditional machine learning to a key aspect of responsible AI governance for LLMs. Early efforts in unlearning primarily focused on removing data from smaller, more specialized models, often in response to privacy regulations such as the GDPR's \"right to be forgotten\" [446]. However, with the advent of LLMs—trained on vast, diverse, and often uncontrolled datasets—the landscape of machine unlearning has undergone significant transformation. This shift has introduced new challenges and imperatives that were previously unforeseen.", + "bbox": [ + 71, + 503, + 490, + 664 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The initial phase of LLM unlearning focused on adapting existing techniques—primarily parameter-adjusting methods like gradient ascent [459] and fine-tuning variants [461, 462, 463, 464, 479]—to the scale and complexity of LLMs. While this phase demonstrated the feasibility of unlearning, it also highlighted several fundamental limitations, such as computational cost [445, 449], catastrophic forgetting [451], and lack of granularity [406]. These limitations have driven the development of more refined approaches, such as parameter-preserving methods [472, 475, 476, 477, 478]. These methods, which utilize techniques like task arithmetic and in-context learning, provide a glimpse of a future where unlearning can be achieved with greater efficiency and precision. The shift to multimodal LLMs has further expanded the scope, necessitating unlearning methods that can address the safety concerns arising from the interaction between different modalities [467, 468, 469, 470, 471]. The current landscape of LLM unlearning can be described as a shift from reactive “data deletion” to proactive “knowledge", + "bbox": [ + 71, + 665, + 491, + 944 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "sculpting.\" We are moving beyond merely removing information to precisely shaping the model's understanding and behavior. This shift is driven by several key insights:", + "bbox": [ + 503, + 53, + 919, + 98 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Unlearning as Preference Optimization: By framing unlearning as preference learning, we can align the model's output with desired safety and ethical guidelines, utilizing techniques like Negative Preference Optimization [465, 466] or safety-oriented preference optimization [480].", + "- The Importance of Context: Since the \"unsafety\" of information is often context-dependent, researchers are developing methods to selectively unlearn behaviors in specific situations while maintaining the model's general capabilities [477, 481, 482, 483].", + "- Multimodal Unlearning: Addressing the fusion of modalities (text, images, audio) presents new challenges in removing unwanted concepts and behaviors both within and across modalities [467, 471, 484]." + ], + "bbox": [ + 504, + 104, + 921, + 308 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Looking ahead, several critical areas are essential for further advancement in the field:", + "bbox": [ + 504, + 315, + 921, + 344 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Principled Evaluation Metrics: Robust, standardized benchmarks are necessary to accurately assess unlearning effectiveness and potential side effects. These metrics should move beyond simplistic, easily manipulated measures [450, 476, 485, 486, 487].", + "- Theoretical Foundations: A deeper understanding of the mechanisms behind unlearning in LLMs is needed to develop truly reliable techniques [451, 488]. This includes exploring why unlearning is challenging and how different methods affect internal representations.", + "- Hybrid Approaches: Combining parameter-adjusting methods (for coarse-grained removal) with parameter-preserving techniques (for fine-grained refinement) presents a promising path forward. This aligns with the \"hybrid governance paradigm\" from Model Editing, allowing for both broad and precise interventions.", + "- Unlearning for Interpretability: Instead of using interpretability solely to guide unlearning, the unlearning process itself can be used to enhance our understanding of model behavior [489]. By selectively removing knowledge and observing the consequences, we gain causal insights into the model's reasoning. This represents a fundamentally different and more powerful use of unlearning—this is the key \"dry goods\" insight.", + "- Unlearning Benchmark: Building upon the aforementioned insight, it is evident that unlearning currently lacks a standardized benchmark. Establishing a method to effectively balance a model's ability to forget while systematically ensuring its performance remains reliable is crucial (Figure 7). In the realm of multimodal learning, creating such a benchmark could be even more complex, potentially representing a pivotal step in advancing this field [471, 490, 491, 492, 493]." + ], + "bbox": [ + 504, + 351, + 921, + 833 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In conclusion, LLM unlearning is not merely a technical challenge; it is a fundamental requirement for building trustworthy and beneficial AI systems or even agent ecosystems [494, 495]. It is evolving from a reactive measure to a proactive design principle, shaping the very foundations of how LLMs learn, adapt, and interact with the world. The journey from \"forgetting\" to \"knowledge sculpting\"", + "bbox": [ + 503, + 839, + 921, + 944 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 32, + 421, + 44 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/3a418fa605c423149f828c2f001b5edc46cfe6b96a344e91855296aab87fa433.jpg", + "image_caption": [ + "Fig. 7: We define the goal of unlearning as maximizing both model utility and forget quality, meaning that algorithms positioned closer to the top-right corner are considered more reliable." + ], + "image_footnote": [], + "bbox": [ + 84, + 56, + 488, + 287 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "is underway, promising a future where LLMs can be both powerful and aligned with human values [496, 497, 498].", + "bbox": [ + 71, + 382, + 491, + 412 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "6 LLM(-AGENT) DEPLOYMENT SAFETY", + "text_level": 1, + "bbox": [ + 73, + 431, + 410, + 448 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In this section, we focus on the safety of LLM and LLM-agent during the deployment phase, addressing three progressively broader dimensions: LLM Safety (Section 6.1), Single-agent Safety (Section 6.2), and Multi-agent Safety (Section 6.3). We begin by discussing the potential threats and defense mechanisms associated with the foundational LLM during inference. Subsequently, we explore the additional security risks introduced by supplementary modules, which impact both individual agents and multi-agent systems. This structured approach ensures a comprehensive understanding of safety challenges at varying scales of LLM(-agent) deployment.", + "bbox": [ + 71, + 450, + 491, + 628 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "6.1 Deployment Safety", + "text_level": 1, + "bbox": [ + 73, + 646, + 256, + 662 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The deployment of a single LLM introduces significant security challenges, including adversarial attacks, data privacy risks, and content integrity concerns. This subsection systematically examines these issues by first analyzing key attack vectors (Subsection 6.1.1), such as model extraction, membership inference, jailbreak attacks, prompt injection, data extraction, and prompt stealing, which threaten model confidentiality, robustness, and ethical compliance. Next, we explore defensive mechanisms (Subsection 6.1.2), including input preprocessing, output filtering, robust prompt engineering, and system-level security controls aimed at mitigating these threats. Finally, we discuss evaluation and benchmarking (Subsection 6.1.3), covering robustness, content safety, privacy leakage, multi-modal safety, and standardized security benchmarks, ensuring a comprehensive assessment of LLM deployment safety. This structure follows a logical progression from identifying threats to implementing defenses and establishing reliable evaluation methodologies.", + "bbox": [ + 71, + 664, + 491, + 941 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "6.1.1 Attack in Deployment", + "text_level": 1, + "bbox": [ + 504, + 53, + 709, + 68 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We first give an overview of the attacks in Figure 8.", + "bbox": [ + 504, + 74, + 864, + 89 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/3c01c7964a0ec82961f085a41736bb2f4f02d485345f84dac051957673a31670.jpg", + "image_caption": [ + "Fig. 8: The overview of attacks in single LLM's deployment phase." + ], + "image_footnote": [], + "bbox": [ + 509, + 107, + 919, + 268 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Model Extraction Attacks. Model extraction attacks aim to steal a deployed language model, which only provides an Application Programming Interface (API) that processes text input (i.e., a prompt) and returns generated outputs. He et al. and Peng et al. [499, 500, 501, 502] made a series of early efforts in launching model extraction or stealing attacks against LLMs (even deployed as a service) and proposed various defense mechanisms to mitigate such risks. Carlini et al. [503] conducted the model-stealing attack against a black-box large language model by targeting its embedding projection layer. Building on this, Finlayson et al. [504] further investigated the risk of stealing embedding dimensions by exploiting the softmax bottleneck. Another line of research explores model extraction in a gray-box setting. For instance, Zanella et al. [505] demonstrated the feasibility of stealing high-fidelity language models when given access to a frozen or fine-tuned encoder.", + "bbox": [ + 501, + 323, + 923, + 571 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Another category of model extraction attacks focuses on recovering the full weight of an LLM. For instance, Horwitz et al. [506] successfully reconstruct a pre-fine-tuned LLM (i.e., the pre-trained model before fine-tuning) using its fine-tuned variants, such as low-rank adaptation (LoRA) models. Beyond general model-stealing attacks, some research explores threats to specialized capabilities. Li et al. [507] extract the coding abilities of an LLM, including code synthesis and translation. Additionally, Liu et al. [508] propose a theoretically grounded method for stealing any low-rank language model.", + "bbox": [ + 501, + 571, + 921, + 733 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Membership Inference Attacks. Membership Inference Attack (MIA) tries to figure out whether a given candidate is included in the training dataset of an LLM [117, 509].", + "bbox": [ + 503, + 733, + 921, + 777 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Methods. [509] propose the first MIA with MIN-K% PROB, which identifies examples that contain few outlier words with low probabilities as non-members. Afterward, [510] propose MIN-K%++, which simulates the membership inference into identifying local maxima. Some works reveal that the success of MIAs against LLMs may be due to sampling non-members from different distributions. Thus, [511] propose Blind attack, which conducts MIA by applying a threshold and completely ignores the target model. [512] selectively combine the existing MIAs and aggregate their scores to perform a statistical test. [513]", + "bbox": [ + 504, + 781, + 923, + 944 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 906, + 32, + 923, + 42 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "identify the membership of a verbatim text by constructing paraphrased options (with another proxy model) and asking the target LLM for true verbatim. [514] examine the relative change in conditional log-likelihoods when prefixing target data points with non-member context. [515] propose to generate noisy neighbors for a target sample by adding stochastic noise in the embedding space. [516] train a neural network to capture variations in output probability distributions between members and non-members.", + "bbox": [ + 84, + 53, + 491, + 198 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$\\nRightarrow$ Document-level MIAs. Some works focus on document-level MIAs. Meeus et al. [517] propose the first MIA for document-level leakage, which contains four steps: retrieving, normalizing, aggregating, and predicting. After that, Meeus et al. [518] validate that it doesn't work against models that do not naturally memorize and propose to utilize copyright traps to detect the use of copyrighted materials. Puerto et al. [519] make exploration toward collection-level MIA against LLMs by computing features and two-stage aggregation.", + "Different Settings. Some works also explore the MIA risk in novel settings. Anderson et al. [520] propose the first MIA against Retrieval Augmented Generation (RAG) systems by directly asking whether one candidate is its member or not. Li et al. [521] compare the output semantic similarity of the sample for the RAG system and the remaining to determine the membership of RAG's database. Zhang et al. [522] propose the first MIA against in-context learning and four attack methods, including GAP, Inquiry, Repeat, and Brainwash. Meanwhile, Duan et al. [523] reveal that MIA risk in in-context learning is more severe than in the fine-tuning setting. Wen et al. [524] conduct membership inference of fine-tuning data by poisoning pretraining data and backdoorsing the pre-trained model. Then Wen et al. [525] comprehensively assess the MIA risk against adaptation methods, including LowRank Adaptation (LoRA), Soft Prompt Tuning (SPT), and In-Context Learning (ICL). Balloccu et al. [526] study the indirect data contamination for closed-source LLMs, which can also be regarded as MIA. Fu et al. [527] propose Self-calibrated Probabilistic Variation, which fine-tunes the reference model by prompting the target LLM.", + "$\\nRightarrow$ Factor Impact. Duan et al. [117] find that the existing MIAs work poorly on LLM due to massive training data and near-one epoch training. Li et al. [528] clarify the impact of fine-tuning and evaluation metrics and propose a three-phase framework (i.e. training, simulation, and confidence calculation) to assess membership leakage. Kandpal et al. [87] find that duplication of training data highly extends the risk of MIA. Naseh et al. [529] validate that using synthetic data in membership evaluations may lead to false classification results." + ], + "bbox": [ + 66, + 199, + 493, + 811 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Jailbreak Attacks. Jailbreak attacks aim to induce the large language model to generate unsafe content like violence [260]. Jailbreak attacks focus on bypassing the safety rules, including system safety prompts and safety filters, while prompt injection attacks target all system prompts. Lots of literature have studied the vulnerability of LLM, where different terms, including \"jailbreak attack\" and \"redteaming\", all point to the same safety vulnerability that", + "bbox": [ + 71, + 825, + 491, + 944 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "generates unsafe content. We classify them into two main categories, i.e. optimization-based and strategy-based.", + "bbox": [ + 503, + 53, + 921, + 82 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Strategy-based jailbreaks figure out novel strategies or templates to generate one adversarial prompt at a heat to test LLMs' vulnerabilities, which are pre-defined. Thus, the generated prompt is non-evolvable. Specifically, useful strategies include persuasion [559], role-playing [560, 561, 562, 563], cipher [564, 565], ASCII [566], long-context [567], low-resource language [568, 569], in-context malicious demonstration [570], overloaded logical thinking [571], misspelling [572], multi-language mixture [573], rephrasing [538, 574, 575, 576], competing objectives and generalization mismatch [577], [wenjie: splitting sub-queries [578]], zero-shot generation [579], personal modulation [580].", + "bbox": [ + 501, + 82, + 921, + 256 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Optimization-based jailbreaks contain a multi-step optimization process to revise one unsafe prompt. Here, we further divide the optimization-based jailbreaks into gradient-based and LLM-based ones:", + "bbox": [ + 501, + 256, + 921, + 314 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Gradient-based Optimization. GCG [260] appends one suffix to the target prompt, then utilizes the gradient of loss, which is calculated with the target (e.g., \"Sure\" or \"Yes\") and output, to optimize the soft prompt. Then, it greedily searches the best-matched tokens in the dictionary for soft prompt replacement. AutoDAN-B [535] solves the limited readability of GCG by constructing a proxy score where the perplexity is considered, which is utilized for greedy sampling. I-GCG [531] improves GCG by appending a template before the suffix and uses a multi-coordinate updating strategy and easy-to-hard initialization to optimize the suffix. COLD-Attack [581] adapts Energy-based Constrained Decoding with Langevin Dynamics for controllable adversarial prompt generation. MA-GCG [532] proposes momentum gradient to boost and stabilize the greedy search for tokens in adversarial prompts. A-GCG [533] introduces a smaller draft model than the target model to sample the promising suffix candidates for faster optimization. BOOST [582] enhances the existing jailbreak attacks by adding eos tokens to the end of the unsafe prompt. CRT [583] proposes an enhanced reinforcement learning-based jailbreak with consideration of prompt diversity. I-FSJ [584] deploys few-shot learning and demo-level random search.", + "$\\Rightarrow$ LLM-based Optimization. PAIR [261] constructs a system prompt and uses an attacker LLM to generate and revise adversarial prompts. It also uses a Judge model to assess the feedback from the victim, which is further utilized for revising the adversarial prompt. AutoDAN-A [534] utilizes crossover strategies and LLM-based mutation to revise adversarial prompts into stealthy sentences. AntoDAN-Trubo [539] AutoDAN-Turbo proposes to find useful strategies by prompting an LLM automatically. ToA (Tree of Attack) [536] iteratively uses an LLM to transform the unsafe prompt into two variations and keeps the prompt variation that achieves a higher score. Xiao et al. [585] adopt a similar pipeline with PAIR [261] and introduce malicious content concealing and memory reframing. Puzzler [586] proposes defensive and offensive measures to conduct an indirect jailbreak. GPT-FUZZER [587] starts from human-written prompts, and uses templates and mutation to rewrite unsafe prompts." + ], + "bbox": [ + 501, + 315, + 923, + 944 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 906, + 32, + 923, + 42 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/1b920951359c81b1de588e6f741d867a8d8080726518d9c47818f949e3c7423e.jpg", + "table_caption": [ + "TABLE 6: A summary of attacks for LLM after deployment. Our evaluation includes representative studies that exemplify these security aspects. More details can be found in the main text. OS indicates whether the code is open-sourced." + ], + "table_footnote": [], + "table_body": "
AttacksMethodOSYearStrategySettingDatasetsTarget ModelsMetrics
Model ExtractionCarlini et al. [503]Yes2024Binary SearchBlack-boxNoneGPTs, LLaMA, Pythia,ada, cabbageQuery&TokenCost, MSE, RMS
Finlayson et al. [504]No2024Softmax BottleneckBlack-boxNonePythia, GPT-3.5Query Cost
Zanella et al. [505]No2024Matrix OperationsGrey-boxSST-2, MNLI, AGNewsBERTs, XLNetQuery Cost, Acc,Agreement
Horwitz et al. [506]Yes2024Spectral DeTuningWhite-boxLoWRAViT, SD, MistralMSWE, SEM
Membership InferenceMIN-K% PROB [509]Yes2023ProbabilitiesBlack-boxWikipediaLLaMAs, Pythia, NeoX,OPTTPR, FPR, ROC,AUC
MIN-K%++ [510]Yes2022Local MaximaBlack-boxWikiMIA, MIMIRPythia, GPT-NeoXLLaMA, OPT, MambaAUROC, TPR, FPR
Blind [511]Yes2024ThresholdBlack-box8 setsGPT-3, OpenLLaMAAUC ROC
LLM-DI [512]Yes2024AggregationBlack-boxPILEPythiasAUC, p-values
DE-COP [513]Yes2024ParaphrasesBlack-boxarXiv:Tection, BookTectionMistral, Mixtral, LLaMA, GPTs, ClaudeAUC
Recall [514]Yes2024Log-LikelihoodsBlack-boxWikiMIA, MIMIRPythia, GPT-NeoXLLaMA, OPT, MambaAUC, TPR@FPR
Noisy [515]No2024Embedding NGBRsGray-boxOpenWebText,WikipediaGPT-2TPR, FPR, AUC
SMIA [516]No2024PerturbationGray-boxWikipedia, FANPythia, Pythia-Deduped, GPT-NeosAUC-ROC, TPR, FPR
FEATAGG [517]No2024Feature AggregationBlack-boxProjectGutenberg,ArXivOpenLLaMATPR@FPR, AUC
RAG-MIA [520]No2024Direct AskingBlack-boxHealthCareMagic,Enronflan, llama, mistralTPR@FPR, AUC-ROC
JailbreakGCG [260]Yes2023Gradient-basedWhite-boxVicuna, LLaMA-2AdvBenchASR, Loss
AmpleGCG [530]Yes2024Hybrid-basedWhite-boxVicuna, Llama-2,Mis-tral,GPTsAdvBenchASR, US, Diver-sity, Time
I-GCG [531]Yes2024Gradient-basedWhite-boxAdvBench,HarmBenchVICUNA, GUANACOLLAMA, MISTRALASR
MA-GCG [532]Yes2024Gradient-basedWhite-boxAdvBenchVicuna, MistralASR, Time
A-GCG [533]Yes2024Gradient-basedWhite-boxAdvBenchLlama2, VicunaASR, Acc
AutoDAN-A [534]Yes2023LLM-basedBlack-boxAdvBenchVicuna, MistralASR, Recheck,PPL
AutoDAN-B [535]Yes2023Gradient-basedWhite-boxAdvBenchVicuna, Guanaco, PythiaASR, Recheck
PAIR [261]Yes2023LLM-basedBlack-boxJailbreakBenchVicuna, Llama-2, GPTs,Claudes,GeminiASR, QPS
ToA [536]Yes2023LLM-basedBlack-boxAdvBench, Harm123Vicuna, Llama-2, PaLM-2,GPTs, Claude3, GeminiGPT4-MetricHuman-Judge
PAL [537]Yes2024LLM-basedBlack-boxAdvBenchLlama-2, GPT-3.5ASR, Manual Labeling
Masterkey [538]No2023RephrasingBlack-boxAdvBench, Harm123GPTs, Bing, BardASR, QSR
AutoDAN-Turbo [539]Yes2024LLM-basedBlack-boxHarmbenchLlama-2, Gemma, GPT-4,GeminiASR, StrongRE-JECT
FlipAttack [540]Yes2025RephrasingBlack-boxAdvBench, StrongRE-JECTGPTs, Claude 3.5 Sonnet, Llama 3.1 405B, Mixtral 8x22BASR
Geneshift [541]Yes2025LLM-basedBlack-boxAdvBenchGPTsASR
Prompt InjectionIPP [542]Yes2022HandcraftBlack-boxOpenAI Examplestext-davinciASR
Greshake et al. [543]Yes2023Data PoisoningBlack-boxNonetext-davinci, GPT-4None
HOUYI [544]Yes2023Components AsmblBlack-boxFive QueriesSUPERTOOLSManual
Yan et al. [130]Yes2023PoisoningBlack-boxSeveral CasesAlpacaNgt, Pst, Ocrc
TT [545]No2023GameBlack-boxTensor TrustGPTs, Claudes, PaLM, LLaMAsRobustness Rate
JudgeDeceiver [546]Yes2024Gradient-basedWhite-boxMT-Bench, LLMBarMRPC, Jfleg, HSOL,RTE, SST2, SMSMistral, Openchat, LlamasACC, ASR, PACKEY-E, LM-E
AUPI [547]Yes2024Gradient-basedWhite-boxMRPC, Jfleg, HSOL,RTE, SST2, SMSLlama2ASR
AUTOHIJACKER [548]No2024LLM-basedBlack-boxAgentDojo, OPILlama, Command-R,GPTsASR
Data Extractionzlib [108]Yes2020Generate & InferenceBlack-boxTop-n, Temperature, InternetGPT-26 metrics
AutoSklearn [549]No2023Greedy, Contrastive, Beam decodingBlack-boxPileGPT-NeoPrecision, Recall,R@FPR
DECOM [550]No2024DecompositionBlack-boxNYT, WSJFrontiersTRM, EMP,BITAP
Context [551]No2022Context, Zero-shot,Few-shotBlack-boxEnron CorpusGPT-NeoAcc
ETHICIST [552]Yes2023Prompt TuningGray-boxLM-ExtractionGPT-NeoRecall
Pli-compass [553]No2024GroundingBlack-boxEnron emailGPT-JExtraction Rate
DSP [554]No2024Dynamic Soft PromptingBlack-boxLMEB, The StackGPT-Neo, Pythia, Star-CoderBaseEER, FER, PPL
PWB [555]Yes2024Gradient-basedWhite-boxPilePythia, LlamaPrecision, AUC,TPR
Prompt StealingSha et al. [556]No2024LLM-basedBlack-boxRetrievalQA,AlpacaGPT4ChatGPT, LLaMAAcc, Precision, Recall, AUC
output2prompt [557]Yes2024LLM-basedBlack-box3 User & 3 SystemPromptsLlamas, GPTsBLEU, CS, Preci-sion, Recall
PRSA [558]No2024Output DifferenceBlack-boxCategory18GPTsBLEU, FastKAS-SIM, JS
", + "bbox": [ + 76, + 126, + 921, + 902 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "ECLIPSE [588] uses an LLM as a suffix generator and optimizer. PAL [537] proposes an online proxy model (which is used for adversarial prompt generation) training pipeline.", + "bbox": [ + 84, + 51, + 490, + 111 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "$\\Rightarrow$ Others. EnJa [589] proposes to ensemble prompt and token-level attack methods via a template-based connector. AmpleGCG [530] first collects lots of successful suffixes and then trains the generative model to generate a specific suffix for a given unsafe prompt. Zhao et al. [590] targets the scenario where the decoding process of target LLM is assisted with smaller models' guidance.", + "bbox": [ + 66, + 111, + 491, + 214 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Prompt Injection Attacks. Prompt injection is a vulnerability where an attacker manipulates the input prompts of LLMs to force them to generate a specific output, which is usually out of the range for normal use (e.g., goal hijacking and prompt leaking [542]), often by injecting malicious text or commands into the input field. Attackers can employ a variety of techniques to carry out such attacks.", + "bbox": [ + 66, + 220, + 491, + 323 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$\\Rightarrow$ Direct Prompt Injection. Perez et al. [542] directly inject handcrafted adversarial prompts into inputs to misalign the language model. HOUYI [544] proposes an injection generation framework which includes three components. Yan et al. [130] utilize LLMs to generate diverse trigger instructions that implicitly capture the characteristics of trigger scenarios. TENSOR TRUST leverages the TENSOR TRUST web game to generate a large-scale dataset and benchmark [545]. AUPI [547] adopts a gradient-based optimization method, specifically, a momentum-enhanced optimization algorithm, to generate universal prompt injection data. Upadhayay et al. [591] argue that LLMs suffer from cognitive overload and propose to use in-context learning to jailbreak LLMs through deliberately designed prompts that induce cognitive overload. Kwon et al. [592] circumvent security policies by substituting sensitive words—likely to be rejected by the language model—with mathematical functions.", + "$\\nLeftrightarrow$ Indirect Prompt Injection. Greshake et al. [543] propose to indirectly inject prompts into the data that are likely to be retrieved. Bagdasaryan et al. [593] design a prompt injection attack against multi-modal LLMs, by generating an adversarial perturbation corresponding to the prompt and blending it into an image or audio recording. Neural Exec [594] designs a multi-stage preprocessing pipeline for cases like Retrieval-Augmented Generation (RAG)-based applications. PoisonedAlign [595] boosts the success of prompt injection attacks by strategically creating poisoned alignment samples in the LLM's alignment process. TPIA [596] crafts non-functional perturbations that contain malicious information and inserts them into the victim's code context by spreading them into potentially used dependencies like packages or RAG's knowledge base. F2A [597] proposes to use feign security detection agents to bypass the defense mechanism of LLMs. AUTOHIJACKER [548] uses a batch-based optimization framework to handle sparse feedback and leverages a trainable memory to enable effective generation.", + "Different Settings. JudgeDeceiver uses gradient-based optimization to inject LLM-as-a-Judge scenarios [546]. Pedro et al. [598] study the risk of injections targeting web applications based on the Langchain framework. Lee et" + ], + "bbox": [ + 66, + 329, + 491, + 943 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "al. [599] propose a human-AI collaborative framework to explore the potential of prompt injection against federated military LLMs. PROMPT INFECTION [600] proposes to make malicious prompts self-replicate across interconnected agents in multi-agent systems. Zhang et al. [601] explore the risk of prompt injection in LLM-integrated systems like LLM-integrated mobile robotic systems.", + "bbox": [ + 514, + 51, + 921, + 156 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Data Extraction Attacks. Data extraction attacks try to figure out the personally identifiable information (PII) that is used to train the LLMs [108]. It starts from sufficient-length prefixes to perform extraction and additional measures to determine if extracted texts are valid.", + "bbox": [ + 503, + 162, + 923, + 234 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$\\nLeftrightarrow$ Methods. In the beginning work [108], the proposed extraction process contains two stages \"generate-then-rank\": sampling potentially memorized examples and membership inference. It proposes a temperature-decaying method to sample more diverse examples and use surrogate models to infer the membership. After that, Al-Kaswan et al. [549] propose using greedy, contrastive, and beam decoding strategies to generate examples and use a classifier to infer the membership. Su et al. [550] propose an instruction decomposition technique to extract fragments of training data gradually. Huang et al. [551] extensively explore the effect of context, zero-shot, and few-shot methods in extracting the personal email address. ETHICIST proposes a smoothing loss and a calibrated confidence estimation method to extract the suffix and measure the confidence [552]. Nakka et al. [553] improves the extraction performance by grounding the prefix of the manually constructed extraction prompt with in-domain data. Wang et al. [554] propose to train a transformer-based generator to produce dynamic, prefix-dependent soft prompts. Ozdayi et al. [105] introduce an approach that uses prompt tuning to control the extraction rates of memorized content. Meng et al. [602] propose a two-stage method, i.e., collection and ranking, to recover PPI when PII entities have been masked.", + "Different Settings. Some works also explore the risk of data leakage in novel settings. Wang et al. [555] study the probability of data extraction in fine-tuning settings and Bargav et al. [603, 604] extract the training data by comparing the output difference before and after the fine-tuning. Jiang et al. [605, 606, 607] propose to extract the private Retrieval-Augmented Generation (RAG) documents. Peng et al. [608] extract the private RAG documents by poisoning in the fine-tuning process. Nasr et al. [107] explore the potential risk of data extraction for the aligned production language models. Panda et al. [609] extract the fine-tuning secret data by poisoning the pertaining dataset. Lu et al. [610] propose to extract PII from an aligned model with model merging. Chen et al. [611] find that fine-tuning can recover the forgotten PIIs in pretraining data. Panchendrarajan et al. [612] propose to extract the whole private training data in the fine-tuning process. Rashid et al. [613] propose selective weight tampering to explore PPI leakage in Federated Language Models. Dentan et al. [614] extract data from layout-aware document understanding models like unimodal or bimodal models.", + "Different Applications. Leveraging the abnormally high" + ], + "bbox": [ + 500, + 242, + 923, + 943 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 906, + 32, + 919, + 42 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "token probabilities, some works utilize the memorization of LLMs to extract the fingerprint or steganography [615]. Al-Kaswan et al. [616] explore memorization in large language models for code and find that code models memorize training data at a lower rate than natural language models. Nie et al. [617] utilize the token-level features derived from the identified characteristics to decode the PII. Lehman et al. [618] reveal the risk of Electronic Health Records leakage of LLMs. Diera et al. [619] conduct experiments to assess the PII leakage of fine-tuned BERT models and found that Differential Privacy (DP) has a negative effect when deployed in fine-tuning. Zhang et al. [620] propose data extraction attacks against text classification with transformers. Huang et al. [621] propose an evaluation tool, i.e. HCR, to assess the PPI leakage in Neural Code Completion Tools.", + "bbox": [ + 84, + 53, + 491, + 286 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "$\\nrightarrow$ Factor Assessment. Some work studies the factors of data extraction including decoding schemes, model sizes, prefix lengths, partial sequence leakages, and token positions [622, 623]. Yash et al. [624] explore the effects of prompt sensitivity and access to multiple checkpoints to extraction attacks. Staab et al. [625] construct a dataset consisting of real Reddit profiles to extract personal attributes. Xu et al. [626] conduct experiments to evaluate the factors of different suffix generation methods and different membership inference attacks in extraction performance. Karamolegkou et al. [627] evaluate the effect of model structure, data type, probing strategies, and metrics.", + "bbox": [ + 68, + 287, + 491, + 474 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Prompt Stealing Attacks. Given that crafting effective prompts requires significant engineering effort and can be considered valuable intellectual property (IP), promptstealing attacks aim to compromise this IP by reconstructing prompts from generated responses [556, 557, 558]. These generation effects are often used to attract prospective prospective buyers. Sha et al. [556] pioneer this approach by collecting a dataset and training classifiers to predict prompt parameters—such as whether the prompt is direct, role-based, or in-context. They then used a large language model (LLM) to reconstruct the prompt. Similarly, Zhang et al. [557] trained an LLM on output-prompt pairs to directly infer the original prompt, while Yang et al. [558] leveraged generation differences to refine surrogate prompts. However, recovering the original prompt solely from the output is challenging. Out of this, Zheng et al. [628] propose a timing-based side-channel method to infer the prompt during inference.", + "bbox": [ + 71, + 477, + 491, + 739 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "6.1.2 Defensive Mechanisms in Deployment", + "text_level": 1, + "bbox": [ + 73, + 750, + 393, + 765 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In Subsubsection 6.1.1, we analyzed various attack scenarios targeting individual LLM deployments. However, in real-world applications, defense mechanisms are not designed as isolated, one-to-one countermeasures against specific attacks. Instead, they follow fundamental security principles to establish a systematic defense framework, as illustrated in Figure 9. This framework integrates multiple layers of protection, ensuring resilience against a wide range of adversarial threats while maintaining model usability and efficiency.", + "bbox": [ + 71, + 767, + 490, + 912 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Input Preprocessing Defenses Input preprocessing serves as the first line of defense in LLM deployment, aiming to", + "bbox": [ + 71, + 912, + 491, + 944 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/f4ce229cb80a8c96656ded2aa655c39cbe884dd49ac1e9e0e1650264348f5451.jpg", + "image_caption": [ + "Defensive Mechanisms in Deployment", + "Fig. 9: The overview of attacks in single LLM's deployment phase." + ], + "image_footnote": [], + "bbox": [ + 506, + 71, + 915, + 159 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "detect and neutralize adversarial inputs before they reach the model.", + "bbox": [ + 503, + 224, + 921, + 253 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Attack Detection & Identification: Effective input filtering [629, 630] begins with attack detection [631], which identifies adversarial prompts through statistical [632], structural [633], or behavioral inconsistencies [634]. Gradient-based detection methods [635] leverage safety-critical gradient analysis and loss landscape exploration to uncover jailbreak prompts that manipulate LLM behavior. These approaches identify adversarial inputs [636, 637] by analyzing how small perturbations [638] affect model outputs, detecting highly sensitive or misaligned gradients that indicate targeted attacks. Perplexity-based methods [632, 632] measure the probability distribution of input sequences, flagging atypical or low-likelihood prompts as potential adversarial inputs. These techniques are particularly effective in detecting prompt injection and adversarial perturbations, where crafted prompts deviate significantly from natural language distributions.", + "bbox": [ + 501, + 255, + 921, + 503 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Beyond individual heuristics, universal detection frameworks [639] integrate multiple detection strategies to counter diverse attack vectors, including prompt injection [640], backdoor manipulations [641], and adversarial attacks [637]. These frameworks employ ensemble-based filtering mechanisms, combining gradient analysis [642], perplexity estimation [643], and syntactic evaluation for generalized attack resilience.", + "bbox": [ + 501, + 503, + 921, + 619 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Semantic & Behavioral Analysis: Attack detection alone is insufficient, as certain adversarial inputs may bypass traditional filtering mechanisms. Semantic [644] and behavioral analysis enhance input preprocessing by evaluating linguistic intent and model alignment. Self-examination techniques allow LLMs [645, 646] to assess whether they are being manipulated, leveraging auxiliary reasoning steps to detect deceptive prompts. Alignment-based verification [647] ensures that the model's responses remain consistent with its safety objectives [330], identifying inputs that subtly nudge the model toward policy violations or ethical misalignment. Intention analysis [648, 649] further refines input filtering by discerning subtle manipulations designed to bypass explicit security checks. Unlike token-level detection, which flags overtly adversarial inputs, intention-aware defenses analyze the semantic structure and purpose of the input to preemptively reject jailbreak attempts.", + "bbox": [ + 501, + 621, + 921, + 869 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Adversarial Defense & Mitigation: When detection and behavioral analysis fail to fully neutralize adversarial inputs, robustness-enhancing techniques [647] mitigate their effects by reducing model susceptibility to manipulation [334, 650]. Semantic smoothing [651, 652] techniques", + "bbox": [ + 503, + 869, + 921, + 944 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 32, + 421, + 44 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "introduce controlled randomness into LLM responses, reducing the model's sensitivity to adversarial perturbations and preventing reliable jailbreak execution. By stabilizing decision boundaries [653], these methods enhance resistance against prompt manipulation strategies that exploit response predictability.", + "bbox": [ + 71, + 53, + 491, + 141 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Preemptive input transformations [654], such as back-translation [655] or paraphrasing, modify incoming queries [651] while preserving semantic intent, disrupting adversarial structures embedded within malicious prompts. Data augmentation [656] and adversarial training further strengthen model robustness by exposing LLMs to adversarial prompts during training, forcing them to learn invariances that reduce their vulnerability to real-world attacks.", + "bbox": [ + 71, + 141, + 491, + 257 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Output Filtering Mechanisms. Output filtering mechanisms [212, 657] serve as a critical safeguard in LLM deployment, ensuring that generated responses comply with safety constraints while preserving informativeness. Unlike input preprocessing, which aims to prevent adversarial prompts from reaching the model, output filtering mitigates harmful content post-generation. Existing approaches primarily follow three paradigms: rule-based constraints, generative adversarial filtering, and toxicity detection.", + "bbox": [ + 71, + 257, + 491, + 388 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Rule-based mechanisms [658] impose predefined constraints on model outputs, preventing the generation of harmful, unethical, or undesired content. Programmable guardrails [659] offer a structured framework where developers can enforce response filtering, topic restriction, and ethical alignment. These methods often integrate reinforcement learning from human feedback [155] or rule-based reward [660] modeling to refine output safety. While effective at handling explicit violations, static rule-based methods struggle with nuanced adversarial prompts and subtle misalignments.", + "bbox": [ + 76, + 388, + 491, + 547 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "To address these limitations, generative adversarial filtering [661] leverages self-critique [662, 663], ensemble detection, and dynamic response evaluation [664]. Self-rectification mechanisms [663, 665] enable LLMs to critique their own outputs and refine responses through iterative refinement. Additionally, ensemble-based [666] moderation models aggregate predictions from multiple LLMs, improving robustness against circumvention techniques. Adaptive filtering frameworks [667] employ perplexity-based assessments and adversarial perturbation detection to flag responses deviating from expected linguistic patterns, enhancing their resilience against jailbreak attempts [668, 669] and toxic content injection.", + "bbox": [ + 71, + 549, + 491, + 737 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Toxicity detection [670, 671, 672] and content moderation [673, 674, 675, 676] further reinforce output safety by identifying and mitigating hate speech [677], misinformation, and other harmful content. Supervised finetuning adapts LLMs to recognize undesirable patterns, while classifier-based detection models [678] filter responses in real-time. Some approaches introduce debiasing strategies, such as controlled decoding [679, 680] and anti-expert guidance [681], to suppress toxic outputs without sacrificing response diversity. However, these methods face challenges in balancing false positives and false negatives, particularly in ambiguous or context-dependent cases.", + "bbox": [ + 71, + 738, + 491, + 912 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "The effectiveness of output filtering hinges on its ability to balance strict control with linguistic flexibility, ensur", + "bbox": [ + 73, + 912, + 491, + 941 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "ing that models remain both safe and practically useful. A hybrid approach combining rule-based safeguards, self-correcting mechanisms, and adaptive toxicity moderation is essential to achieving robust and scalable LLM deployment. Robust Prompt Engineering. Robust prompt engineering aims to enhance LLM safety by designing input prompts that resist adversarial manipulation [682], protect sensitive data, and mitigate harmful outputs—all [683] without modifying model parameters. These strategies act at the interaction level, offering lightweight and model-agnostic protection.", + "bbox": [ + 501, + 53, + 921, + 212 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Recent efforts have introduced prompt optimization techniques grounded in adversarial robustness, including embedding-space manipulation and defensive objective alignment. Methods such as Robust Prompt Optimization [684] and Prompt Adversarial Tuning generate transferable suffixes [668] or prefix [685] embeddings to guide model behavior [686] under attack [687], effectively lowering jailbreak success rates while preserving task performance. Similarly, goal prioritization frameworks [688] enforce inference-time objective consistency, dynamically resolving conflict between user instructions and safety constraints without requiring access to malicious samples. Complementary to these strategies, patch-based methods integrate interpretable suffixes or structured self-reminders [689] into prompts, reducing the model's susceptibility to coercive inputs through lightweight, modular defenses.", + "bbox": [ + 503, + 213, + 923, + 460 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Structural manipulation approaches [690] neutralize adversarial intent through prompt rewriting. Spotlighting [691] injects source-attribute signals to counter indirect prompt injection, while inverse prompt engineering [692] repurposes attack data to generate task-specific defensive prompts under the principle of least privilege.", + "bbox": [ + 503, + 460, + 923, + 549 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Privacy-preserving prompt [693] design introduces formal guarantees through differential privacy. Approaches like DP-Prompt [694] and stochastic gradient masking [695] reduce information leakage from prompts without harming performance. Desensitization and directional control of incontext representations offer additional privacy protections during prompt construction. Prompt engineering [579, 696] also helps mitigate societal risks. Chain-of-thought prompting and guided templates reduce gender bias [697] in reasoning tasks, while prompt learning [698] improves toxicity detection and generation control [699, 700], often surpassing specialized models in efficiency and generalization.", + "bbox": [ + 503, + 549, + 923, + 723 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Finally, systematic prompt optimization methods [701, 702] aim to generalize prompt robustness across tasks and domains. Techniques like BATPrompt [703] and StraGo [704] use adversarial simulation and strategic decomposition to refine prompts iteratively, improving both resilience and effectiveness under variable inputs.", + "bbox": [ + 503, + 723, + 921, + 811 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "System-level Security Controls. System-level defenses [705] enhance LLM deployment by optimizing inference, enforcing alignment, isolating untrusted inputs, and securing the supply chain. Systems like Petals [706], Sarathi-Serve [707], and DistServe [708] restructure computation to improve throughput and latency, while TriForce [709], Medusa [710] MagicDec [711] accelerate generation via speculative decoding and structural compression. Parallel frameworks such as DeepSpeed-FastGen [712] and SpecExec [713] further boost", + "bbox": [ + 503, + 811, + 923, + 941 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "efficiency with minimal overhead.", + "bbox": [ + 71, + 53, + 313, + 66 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Runtime alignment methods [714] adapt model behavior through cross-model guidance or token-level reward modeling. Systems such as SelfDefend [715] and Gradient Cuff [716] detect unsafe generation by monitoring agreement across models or loss landscapes, while Spotlighting [691] inserts provenance signals to mitigate indirect prompt injection.", + "bbox": [ + 71, + 66, + 490, + 170 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Access isolation is achieved through policy enforcement [717] and system wrappers [688]. At the supply level, tools like MalHug [718] identify poisoned models, while system audits reveal sandbox and plugin vulnerabilities, highlighting the need for end-to-end secure deployment.", + "bbox": [ + 71, + 170, + 491, + 244 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "LLM-based guard models utilize lightweight LLMs like Llama Guard [330], Aegis Guard [719, 720], WildGuard [721], and ShieldGemma [722] to moderate both the input and output of the victim LLMs. However, they are purely classifiers. To solve this problem, the first reasoning-based guard model named GuardReasoner [723] is proposed to improve the performance, explainability, and generalization ability via learning to reason. It brings new opportunities for the safety of large-scale reasoning models [724].", + "bbox": [ + 71, + 244, + 490, + 377 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "6.1.3 Evaluation and Benchmarks in Deployment", + "text_level": 1, + "bbox": [ + 73, + 392, + 426, + 407 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "To assess the reliability and safety of LLMs after deployment, evaluation efforts focus on several key dimensions and risk types, as illustrated in Figure 10. These dimensions guide the design of systematic benchmarks and metrics tailored for real-world deployment settings.", + "bbox": [ + 71, + 411, + 491, + 486 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/7e5d6796694a8d1a706054c8a700440e2e26505bd752ab4e4efae519e5f05197.jpg", + "image_caption": [ + "Fig. 10: The overview of evaluation and benchmarks in single LLM's deployment phase." + ], + "image_footnote": [], + "bbox": [ + 76, + 502, + 488, + 652 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Robustness Evaluation. To systematically assess the relia-", + "bbox": [ + 73, + 705, + 488, + 720 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/5e09ecec36584d1cf538edd7e63fbbb1fa61ac0fa57f6f3644e5882b53355973.jpg", + "table_caption": [ + "TABLE 7: Summary of LLM robustness benchmarks at the deployment stage." + ], + "table_footnote": [], + "table_body": "
BenchmarkAdversarialNaturalJailbreakToxicity
JailbreakBench [306]
HarmBench [305]
JAMBench [725]
JailbreakEval [726]
Latent Jailbreak [727]
PromptRobust [728]
SelfPrompt [729]
Chen et al. [730]
Chu et al. [731]
AdvGLUE [732]
AdvGLUE++ [333]
NoiseLLM [733]
NEO-BENCH [734]
CompressionEval [735]
", + "bbox": [ + 75, + 766, + 488, + 939 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "bility of large language models (LLMs) after deployment, we categorize robustness evaluation into two broad types: adversarial robustness and natural robustness. Adversarial robustness focuses on evaluating how LLMs respond to malicious or adversarial inputs, such as jailbreak prompts, prompt injections, or red-teaming attacks. Natural robustness, on the other hand, assesses LLM behavior under nonmalicious but realistic distribution shifts, including typos, paraphrasing, novel word usage, or temporal drift. A summary of representative benchmarks categorized along these 4 dimensions is presented in Table 7.", + "bbox": [ + 501, + 53, + 921, + 213 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Adversarial Robustness: A range of benchmarks and frameworks have been proposed for adversarial robustness. JailbreakBench [306] provides a standardized evaluation suite for jailbreak attacks, containing 100 misuse behaviors and an evolving repository of adversarial prompts. HarmBench [305] proposes a comprehensive red-teaming evaluation framework that includes 510 harmful behaviors spanning diverse semantic and functional categories, supporting both text-only and multimodal inputs across 33 LLMs. JAMBench [725] targets the evaluation of moderation guardrails using 160 carefully constructed prompts across four major risk categories and introduces a cipher-character-based attack. JailbreakEval [726] offers a unified toolkit for jailbreak assessment with string-matching, classifier-based, and LLM-based evaluators. Latent Jailbreak [727] focuses on detecting embedded malicious intent in seemingly benign prompts and evaluates instruction-following robustness using a hierarchical annotation scheme. PromptRobust [728] benchmarks prompt-level robustness with character, word, sentence, and semantic-level perturbations across 13 datasets and 8 NLP tasks. SelfPrompt [729] enables autonomous robustness evaluation through knowledge-guided prompt generation and LLM-based self-assessment. Chu et al. [731] conduct a large-scale comparison of 17 jailbreak attacks on 8 LLMs and 160 forbidden prompts, proposing a unified taxonomy and benchmarking various defenses. Chen et al. [730] propose a multi-dimensional framework assessing jailbreak reliability over 13 LLMs and 1,525 prompts, integrating metrics such as attack success rate (ASR), toxicity, fluency, and grammatically. Zhang et al. [736] propose a novel definition and benchmark for LLM's content moderation based on a sensitive-semantic perspective.", + "bbox": [ + 501, + 213, + 921, + 694 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Natural Robustness: Several benchmarks focus on evaluating LLMs under realistic but benign input perturbations or distribution shifts. AdvGLUE [732] and AdvGLUE++ [333] extend the original GLUE benchmark [737] with semantically-preserving perturbations at logic, word, and sentence levels. NoiseLLM [733] presents a unified framework for evaluating slot-filling robustness under character-, word-, and sentence-level noise, including typos and paraphrases. NEO-BENCH [734] assesses robustness to temporal drift by introducing neologisms into tasks such as machine translation, classification, and question answering. CompressionEval [735] provides a prompt-free evaluation framework using lossless compression to assess generalization and robustness, comparing LLM performance on content before and after the model's knowledge cutoff. These benchmarks offer complementary perspectives for assessing LLM performance under both malicious and naturally", + "bbox": [ + 501, + 694, + 921, + 943 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "occurring input variations.", + "bbox": [ + 73, + 54, + 263, + 66 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Content Trustfulness and Fairness Evaluation. Beyond ro", + "bbox": [ + 73, + 66, + 490, + 82 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/494c56ce4c14d6ec67999ba68c4f9c1261aae972f1017e76671270ae0d772dfb.jpg", + "table_caption": [ + "TABLE 8: Summary of content trustfulness and fairness evaluation benchmarks for LLMs at deployment stage." + ], + "table_footnote": [], + "table_body": "
BenchmarkHallucinationFactualityToxicityBiasDiscrimination
HaluEval [738]
Med-HALT [739]
ANAH [740]
SelfCheckGPT [741]
DoLa [742]
Mundler et al. [743]
Elaraby et al. [744]
Ji et al. [745]
Zhang et al. [746]
Guo et al. [747]
RTP-LX [748]
ROBBIE [749]
CEB [750]
", + "bbox": [ + 75, + 126, + 491, + 263 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "bustness, a key dimension of deployment-stage evaluation concerns the trustfulness and fairness of LLM-generated content. This includes detecting and mitigating outputs that are factually incorrect (hallucinations), misleading (low factuality), harmful (toxic), or unfair (biased or discriminatory). We categorize existing benchmarks into five axes: hallucination, factuality, toxicity, bias, and discrimination, and summarize representative works in Table 8.", + "bbox": [ + 71, + 272, + 491, + 388 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Benchmarks in this space target either the accuracy of generated content or its alignment with human values. For hallucination and factuality evaluation, HaluEval [738] and MedHALT [739] provide reference-based hallucination annotations in general and medical domains, respectively, while ANAH [740] delivers fine-grained, human-annotated hallucination labels with correction spans. SelfCheckGPT [741] detects hallucinations via consistency checks across multiple generations, and DoLa [742] proposes a decoding strategy that contrasts internal layer activations to reduce factual errors. Other works such as Mundler et al. [743], Elaraby et al. [744], and Ji et al. [745] leverage taxonomic definitions or internal model signals to quantify or predict hallucination risk. Zhang et al. [746] introduce FEWL, a reference-free evaluation framework that uses agreement across reference LLMs to approximate hallucination likelihood.", + "bbox": [ + 71, + 388, + 491, + 621 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "In terms of toxicity detection, Guo et al. [747] show that role-playing prompts (persons) can elicit toxic behavior from ChatGPT, and RTP-LX [748] evaluates multilingual LLMs in detecting culturally sensitive harm. Both studies reveal that current LLMs remain vulnerable to subtle toxic or culturally biased outputs, especially in low-resource languages or when confronted with indirect harm.", + "bbox": [ + 71, + 621, + 491, + 723 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "For evaluating social bias and discrimination, ROBBIE [749] benchmarks LLMs across 12 demographic axes with template-based prompts and multiple toxicity and regard metrics, covering gender, race, religion, and intersections thereof. CEB [750] proposes a compositional taxonomy for fairness evaluation and introduces multiple new datasets spanning stereotyping, toxicity, and classification bias, supporting both direct and indirect evaluation modes.", + "bbox": [ + 71, + 723, + 491, + 840 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "These benchmarks collectively provide a multidimensional view of content trustfulness and fairness, enabling the systematic evaluation of LLMs beyond syntactic correctness or surface fluency. As safety-critical deployment scenarios become increasingly prevalent, such evaluation tools play a central role in ensuring the responsible use of LLMs.", + "bbox": [ + 71, + 840, + 491, + 926 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Data Privacy and Leakage Evaluation. Data privacy is", + "bbox": [ + 73, + 926, + 491, + 941 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/507c336b5a2d24ce18489c83891919090519cf1f20f6a7cceb030ba324f22d7d.jpg", + "table_caption": [ + "TABLE 9: Summary of privacy evaluation benchmarks for LLMs at the deployment stage." + ], + "table_footnote": [], + "table_body": "
BenchmarkPIIMIAEIACompliance
PrivLM-Bench [751]
LLM-PBE [752]
PrivAuditor [753]
Rossi et al. [754]
Whispered Tuning [755]
ProPILE [103]
PrivaCI-Bench [756]
Commercial Audit [757]
LessLeak-Bench [758]
SecureSQL [759]
DecodingTrust [333]
", + "bbox": [ + 519, + 92, + 903, + 234 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "a critical dimension in evaluating the trustworthiness of LLMs at deployment. Table 9 summarizes representative benchmarks that assess privacy risks along four axes: personally identifiable information (PII) leakage, membership inference attacks (MIA), embedding inversion attacks (EIA), and regulatory or contextual compliance.", + "bbox": [ + 503, + 257, + 921, + 345 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "PrivLM-Bench [751] and LLM-PBE [752] offer comprehensive multi-level evaluations spanning all three major attack types. PrivAuditor [753] and Rossi et al. [754] focus on adaptation-stage vulnerabilities across a variety of finetuning techniques. Whispered Tuning [755] proposes a differential privacy-based training scheme to reduce leakage, while ProPILE [103] tests whether LLMs can reconstruct sensitive information from prompts related to known users.", + "bbox": [ + 503, + 345, + 921, + 460 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "PrivaCI-Bench [756] and Commercial Audit [757] emphasize regulatory compliance, evaluating model behavior against privacy expectations and legal frameworks such as GDPR and the EU AI Act. SecureSQL [759] examines leakage in structured query generation, and LessLeak-Bench [758] reveals code-specific leakage across software engineering benchmarks. Finally, DecodingTrust [333] includes privacy as part of a broader trustworthiness suite, auditing GPT models across multiple risk dimensions.", + "bbox": [ + 503, + 460, + 921, + 593 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Together, these benchmarks provide a foundation for assessing LLM privacy risks across diverse modalities, attack surfaces, and deployment scenarios.", + "bbox": [ + 503, + 593, + 921, + 636 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Multi-modal Safety Evaluations As multimodal large language models (MLLMs) become increasingly integrated into real-world applications, ensuring their safety under diverse input conditions is essential. A growing number of studies have proposed evaluation benchmarks and frameworks to assess MLLM vulnerabilities across multiple dimensions [760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782].", + "bbox": [ + 503, + 636, + 921, + 752 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Jailbreak evaluation has received significant attention, with benchmarks such as MM-SafetyBench [760] and Jailbreakv-28k [761] targeting harmful instruction-following behaviors. MMJ-Bench [762] and Retention Score [763] further extend jailbreak assessment to include visual robustness and long-term safety retention. For hallucination, several works diagnose MLLM failures arising from inconsistencies between visual inputs and generated text, including HallusionBench [764], POPE [765], and Bingo [766]. SIUO [767] complements this direction by evaluating cross-modality consistency under seemingly benign inputs.", + "bbox": [ + 501, + 752, + 921, + 912 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Robustness under adversarial visual corruption is assessed in MVTamperBench [768] and B-AviBench [769],", + "bbox": [ + 503, + 912, + 921, + 941 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "which introduce perturbed or misleading visual stimuli to test model stability. Meanwhile, fairness and social bias have been evaluated through VIVA [770], GenderBiasVL [771], FACET [772], FairDeDup [773], CounterBias [774], PAIRS [775], DeAR [776], and MMBias [777], covering gender, racial, and intersectional dimensions using parallel image sets, counterfactual probing, and real-world dataset imbalances.", + "bbox": [ + 71, + 51, + 491, + 167 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "To unify these evaluation directions, several comprehensive frameworks have emerged. MultiTrust [778] and SPAVL [779] aim to benchmark MLLMs across diverse safety criteria, including robustness, fairness, and harmfulness. Q-Eval-100K [780] complements these efforts by focusing on visual generation quality and alignment under instruction-following settings.", + "bbox": [ + 71, + 170, + 491, + 273 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Collectively, these benchmarks highlight the unique challenges posed by multimodal interactions and the growing need for holistic, scalable safety evaluations tailored to MLLMs.", + "bbox": [ + 71, + 273, + 491, + 333 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "6.2 Single-agent Safety", + "text_level": 1, + "bbox": [ + 73, + 363, + 261, + 378 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "In this section, we focus on security issues related to a single agent. We first define an agent as an interactive entity that uses an LLM as the core for reasoning, decision-making, and reflection while integrating memory, tools, and the environment as capability-enhancing components. Beyond the deployment risks associated with the LLM core, we introduce the security issues arising from these three additional modules. Specifically, for tools (Section 6.2.2) and memory (Section 6.2.3), we summarize existing work from both attack (Section 6.2.4) and defense (Section 6.2.5) perspectives to identify technical paradigms. For the environment (Section 6.2.6), we explore unique security challenges from the perspective of various agent-interaction settings. We demonstrate an overview of agent safety in Figure 12.", + "bbox": [ + 71, + 387, + 491, + 592 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "6.2.1 Definition of Agent", + "text_level": 1, + "bbox": [ + 73, + 613, + 259, + 628 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "LLM-driven agent refers to an AI system capable of operating independently or with limited human oversight, where a sophisticated language model [6, 783, 784, 785] serves as the foundational intelligence for processing inputs, executing tasks, and engaging in interactions. By leveraging advanced natural language understanding and generation, such agents [29, 786, 787, 788, 789] can analyze information, resolve queries, and adapt to user or environmental inputs [790, 791, 792]. To extend their functionality, they frequently incorporate supplementary mechanisms—such as data storage modules [23, 793, 794, 795], external software interfaces [790, 796, 797], or strategic reasoning frameworks [798]—allowing them to transcend basic text production. This adaptability makes them valuable for diverse implementations, including interactive dialogue systems [799], workflow optimization [800, 801, 802, 803], and complex decision-making scenarios [804]. In this study, we focus on deconstructing agent safety into three critical dimensions: tool utilization, memory management, and environment-specific security concerns. We demonstrate the components and structures of agent systems in Figure 11.", + "bbox": [ + 71, + 635, + 493, + 944 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "6.2.2 Tool Safety", + "text_level": 1, + "bbox": [ + 504, + 53, + 638, + 68 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Some works enable LLM agents to learn how to use tools by generating datasets and fine-tuning the model for API usage [25, 805]. Specifically, tools can be implemented in various forms, including but not limited to code-based API functions (e.g., search engine [806] and calculator), embodied intelligence like robotic arms [807], and more. A tool serves as a bidirectional medium: on one hand, it allows the agent to map internal decisions into actions within the interactive environment; on the other hand, it also acts as a means for the agent to collect information from the external world. Given the pivotal role of tools in agent components, the related security issues are worth exploring [74]. For example, in the field of web security, Fang et al. [808, 809] investigate how autonomous agents, when equipped with appropriate tools, can independently compromise websites and exploit one-day vulnerabilities in real-world systems without human intervention. Next, we will summarize and discuss existing research from attack perspectives and figure out the lack of tool invocation defense in current research.", + "bbox": [ + 503, + 75, + 924, + 354 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Attacks. Based on the target of the attack, safety-related attacks involving tools can be categorized into Tool-aided Attacks and Tool-targeted Attacks. The former refers to attackers utilizing agents equipped with tools to execute attacks that LLMs cannot independently assist with, such as leveraging agents with web access and code execution capabilities to facilitate cyberattacks. The latter involves attackers targeting the tool invocation process itself, attempting to manipulate or induce tool selection for malicious purposes through various attack methods. However, from the perspective of the technical stack of attacks, the two can be unified. We have identified new applications of traditional LLM attack methods in tool safety, as well as novel attack paradigms that have emerged due to the unique characteristics of tools.", + "bbox": [ + 503, + 354, + 924, + 574 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Jailbreak. Similar to jailbreak methods in LLM safety, agent jailbreak also bypasses the agent's built-in safety mechanisms through specific prompts to elicit malicious responses. However, in the agent scenario, the malicious behaviors it aims to induce are different. Specifically, Cheng et al. [810] manually craft jailbreak prompts to extract personal information from the training data of code-generation agents. In contrast, Fu et al. [811] and Imprompter [812] both employ gradient-based optimization like GCG [260] to automatically generate input prompts or images that manipulate agents into leveraging tools for privacy breaches in dialogues or executing harmful actions on user resources.", + "bbox": [ + 503, + 575, + 924, + 751 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Injection. This type of attack can be summarized into two forms of injection: Prompt Injection (similar to LLM safety vulnerabilities) where malicious instructions are embedded in input data, exploiting the difficulty LLMs face in distinguishing between instructions and data. Another form is Tool Injection where malicious tools are injected to enable further exploitation, such as using the tool to execute malicious actions. For example, BreakingAgents [813] utilizes human-crafted prompt injections to execute malfunction attacks, causing agents to engage in repetitive or irrelevant actions, with additional exploration into the propagation of such attacks within Multi-Agent Systems (MAS). ToolCommander [814] is the second type. It proposes a two-stage", + "bbox": [ + 503, + 752, + 924, + 944 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 25 + }, + { + "type": "image", + "img_path": "images/cf762587d56c382c4c037d7bf5aac6c071b7d0e9976abb0ab003388647d5eb60.jpg", + "image_caption": [ + "Fig. 11: The overview of LLM-based single-agent and multi-agent systems." + ], + "image_footnote": [], + "bbox": [ + 75, + 51, + 923, + 351 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "attack strategy: first, injecting malicious tools to steal user queries, and subsequently manipulating tool selection using the stolen data, thereby achieving privacy theft and denial-of-service attacks.", + "bbox": [ + 71, + 392, + 490, + 450 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Backdoor. Backdoor attacks also find utility in the context of agent safety, but unlike LLMs, LLM agents develop diverse verbal reasoning traces through continuous environmental interactions, broadening potential backdoor attack vectors. Yang et al. [815] define two types of backdoor attacks, targeting either the final returned results or the intermediate processes of the attacking agent, and implement the above variations of agent backdoor attacks on two typical agent tasks, including web shopping and tool utilization. Furthermore, DemonAgent [816] decomposes a backdoor into multiple sub-backdoor fragments to poison the agent's tools. Beyond intentional guidance, studies such as BadAgent [817] highlight that backdoor attacks can inadvertently prompt agents to misuse tools for malicious purposes.", + "bbox": [ + 71, + 454, + 490, + 672 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Manipulation. This type of attack refers to directly or indirectly manipulating or altering the tool's returned content to leak sensitive information or carry out malicious actions. AUTOCMD [818] employs a separate LLM, trained on tool-calling datasets and fine-tuned with target-specific examples, to generate and replicate legitimate commands for extracting sensitive information from tools. Meanwhile, Zhao et al. [819] manipulate third-party API outputs by injecting malicious content or omitting critical information, ultimately causing erroneous or biased system behaviors.", + "bbox": [ + 71, + 676, + 488, + 821 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Defenses. Compared to attacks on agent tools, defense mechanisms for secure tool invocation have been less studied. Specifically, AgentGuard [820] employs LLM orchestrators to automatically detect unsafe tool-use workflows and produce safety constraints for secure tool utilization. PrivacyAsst [821] proposes an encryption-based solution by integrating an encryption scheme into the tool using LLM agents to safeguard user privacy and align them", + "bbox": [ + 71, + 825, + 491, + 944 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "with computational security standards. In addition, some works enhance the security of agent systems by leveraging tool invocation, GuardAgent [822] pioneers an approach to verify target agents' trustworthiness by executing guardrail code through API calls during task plan implementation.", + "bbox": [ + 503, + 392, + 921, + 467 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "6.2.3 Memory Safety", + "text_level": 1, + "bbox": [ + 504, + 481, + 666, + 496 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "The memory mechanism in LLM agents enables them to retain historical behaviors, thereby enhancing future decision-making capabilities. Typically, agent memory can be categorized into long-term and short-term memory systems. The long-term memory module commonly employs Retrieval-Augmented Generation (RAG) [823, 824] technology to facilitate precise information retrieval, while the short-term memory stores real-time data to support immediate conversational contexts and task execution. While these memory modules significantly improve agent functionality, they simultaneously introduce potential security vulnerabilities, making the system susceptible to malicious attacks.", + "bbox": [ + 501, + 500, + 921, + 675 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "6.2.4 Attack", + "text_level": 1, + "bbox": [ + 504, + 689, + 604, + 703 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Follow the trustworthy issues in [74], we categorize attacks related to memory into three types: Memory Poisoning, Privacy Leakage, and Memory Misuse.", + "bbox": [ + 503, + 708, + 921, + 752 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "(I) Memory Poisoning refers to adversarial attacks where malicious data is injected into an agent's long-term memory [313, 825, 826, 827, 828, 829]. When the agent retrieves and utilizes such corrupted memory, it may produce erroneous outputs, misleading responses, or even hazardous actions. For example, PoisonedRAG framework [827] employs a dual optimization approach, simultaneously manipulating both the retrieval and generation pipelines to systematically poison the agent's memory system. AgentPoison [826] introduces an advanced backdoor attack methodology that optimizes trigger patterns and seamlessly integrates them into query formulations, significantly elevating the likelihood of malicious sample retrieval", + "bbox": [ + 501, + 752, + 921, + 941 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/a2b149c02628f0cb46be90a88c408bf263d347f8b42ba68b6b83ded7364f1a70.jpg", + "image_caption": [ + "Fig. 12: The overview of the safety of LLM-based agent systems." + ], + "image_footnote": [], + "bbox": [ + 86, + 60, + 906, + 371 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "while maintaining stealth. (II) Privacy Leakage occurs when attackers exploit the interface between an agent and its long-term memory to extract stored sensitive data [520, 605, 607, 830, 831]. Such breaches may expose user information to malicious third parties, posing significant real-world risks. (II) Memory Misuse refers to the deliberate construction of multi-turn query sequences that systematically circumvent safety protocols by exploiting the retention properties of agent short-term memory [752, 832, 833, 834, 835, 836]. This attack vector enables progressive erosion of defensive measures through iterative interaction patterns.", + "bbox": [ + 71, + 419, + 491, + 580 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "6.2.5 Defense", + "text_level": 1, + "bbox": [ + 73, + 590, + 187, + 604 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "To counter these attacks, various defense approaches have been developed to enhance the robustness of memory systems [520, 835, 837, 838, 839]. (I) Detection Detection mechanisms primarily focus on identifying and eliminating malicious content retrieved from long-term memory systems [835, 838, 839?]. (II) Prompt Modification involves strategically rewriting user queries before processing by the agent to enhance response safety [520, 835]. (III) Output Intervention involves real-time monitoring and modification of agent responses prior to delivery to ensure safety and accuracy [825, 840].", + "bbox": [ + 71, + 607, + 490, + 768 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "6.2.6 Environment Safety", + "text_level": 1, + "bbox": [ + 73, + 779, + 266, + 794 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Agents operate within dynamic and heterogeneous environments, spanning physical and digital domains [841, 842, 843]. Their interaction with these environments is a multistep process [844, 845]. First, agents engage in perception, gathering data from sources like sensors in a physical setup or digital platforms [806]. This perceived data is then analyzed using various algorithms and reasoning mechanisms to identify patterns and potential actions [846]. Based on this analysis, agents take action, which can either directly influence the environment, like an autonomous vehicle making", + "bbox": [ + 71, + 796, + 490, + 941 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "a lane change [847], or modify their own internal state, such as a software agent updating its knowledge base [848].", + "bbox": [ + 503, + 419, + 921, + 450 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "However, this interaction is plagued by trustworthiness challenges. There are security risks in every process of interaction with the environment [849]. Agent roles and environmental constraints contribute to risks such as autonomous driving errors [850] and network disruptions [806, 851]. Given the diverse dynamic scenarios and related issues [849, 852, 853], the existing solutions are fragmented and lack a systematic framework. Thus, we will explore trustworthiness and security aspects by categorizing relevant papers according to whether they focus on ensuring safety in the perception, analysis, or action phase of the agent-environment interaction, as illustrated in Figure 10.", + "bbox": [ + 501, + 452, + 921, + 628 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Perception. The perception phase serves as the foundational layer of agent-environment interaction, where agents acquire raw data to interpret their surroundings. However, this phase is inherently vulnerable to risks such as data poisoning, environmental noise, and biased observations. Hudson [841] converts real-time sensory inputs into natural language representations augmented with security validation protocols, employing causal analysis techniques to improve reliability during adversarial perception scenarios. ChatScene [847] develops safety-oriented simulation environments for autonomous systems by converting linguistic commands into executable code compatible with CARLA's simulation architecture. Chen et al. [854] systematically categorize perceptual vulnerabilities in financial AI systems, identifying three primary risk categories: synthetic data generation errors, temporal inconsistency challenges, and susceptibility to engineered input manipulations.", + "bbox": [ + 501, + 631, + 921, + 880 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Reasoning. The reasoning phase transforms raw perceptual data into actionable insights through decision-making models, and knowledge-based inference. This stage is critical to ensure agents act appropriately in dynamic environments,", + "bbox": [ + 503, + 883, + 923, + 941 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 32, + 421, + 44 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/9deb0bb23bd9a7df4575cdd26e5b8aac051c2140a99729831eb7c59ed428f59b.jpg", + "image_caption": [ + "Fig. 13: The overview of agent and environment interactions." + ], + "image_footnote": [], + "bbox": [ + 76, + 56, + 488, + 204 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "but introduces unique trustworthiness challenges. Yang et al. [846] develop a temporal safety verification framework using formal logic systems, implementing dual mechanisms for auditing the compliance of safety protocols and filtration of hazardous decisions to meet the requirements of industrial robotics. Agents4PLC [855] establishes an industrial control programming framework that combines automated code synthesis with formal verification processes, integrating RAG [235] and COT [343] to ensure operational integrity. Xiang et al. [822] propose medical AI systems that employ semantic reasoning engines for confidential data protection. Park et al. [845] demonstrate improved threat detection capabilities through simulated organizational communication patterns in anomaly identification systems.", + "bbox": [ + 71, + 270, + 491, + 474 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Action. The action phase represents the culmination of agent-environment interaction, where agents execute decisions to influence their surroundings or update internal states. Trustworthiness at this stage hinges on ensuring that actions are safe, precise, and aligned with intended objectives. Fang et al. [851] reveal the capacity of autonomous systems to exploit digital infrastructure weaknesses through adaptive penetration testing, prompting the development of specialized evaluation frameworks for web agents. Furthermore, researchers develop frameworks to evaluate the truthfulness of web agents. Polaris [856] implements distributed AI architectures to enhance fault tolerance and response accuracy of healthcare interaction systems. La et al. [857] employ linguistic evolution models to simulate adaptive content generation patterns that circumvent automated moderation systems, providing insights for regulatory mechanism improvements.", + "bbox": [ + 71, + 476, + 491, + 724 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "6.3 Multi-agent Safety", + "text_level": 1, + "bbox": [ + 73, + 746, + 250, + 762 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "In the previous section, we explored security issues in a single agent setting and this section expands the discussion to multi-agent systems (MAS) [58, 71, 858, 859, 860, 861]. Since a single agent has limited problem-solving capabilities and a relatively narrow perspective, it struggles to conduct a comprehensive analysis of complex problems. In contrast, in MAS, agents can interact through various mechanisms, such as cooperation, competition, and debate, enabling them to solve complex problems more efficiently and effectively [862]. However, these interactions also introduce more complex and diverse security challenges [863]. Consequently, compared to single-agent systems, MASs face more severe", + "bbox": [ + 71, + 767, + 490, + 941 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "and intricate security risks [864]. Similarly, we summarize and discuss existing research from both attack and defense perspectives.", + "bbox": [ + 503, + 53, + 921, + 98 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "6.3.1 Attack", + "text_level": 1, + "bbox": [ + 504, + 108, + 604, + 122 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "In MAS, security threats primarily stem from the propagation of harmful information, hallucinations, and biases through agent interactions, as well as the coordinated planning and optimization of attacks to target security agents within the system. These threats can arise spontaneously through the unintended amplification of misinformation or be deliberately orchestrated by malicious agents. Attack strategies in MAS often integrate multiple traditional techniques, such as prompt injection, jailbreak, and adversarial attacks, while also exploiting emergent properties of agent communication and collaboration. This multi-faceted nature makes MAS attacks more covert, adaptive, and challenging to detect and mitigate. Moreover, the dynamic and autonomous nature of agents allows adversaries to refine their attacks in real-time, further complicating defense mechanisms. Below, we summarize the key research related to these threats.", + "bbox": [ + 501, + 126, + 923, + 375 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Transmissive Attack. It spreads within the MAS like a virus, propagating dangerous and harmful information, including covert malicious content, continuously attacking and compromising the agents in the system. Agent Smith [829] uses adversarial attack techniques, harmful images are generated—appearing benign on the surface but embedding malicious information. These images propagate within the MAS, causing agents to be compromised and posing significant security risks. CORBA [865] introduces Contagious Recursive Blocking Attacks, which exhibit transmissibility across any topological network and can continuously drain computational resources. Lee et al. [600] introduce Prompt Infection in MAS, including data theft, scams, misinformation, and system-wide disruption, which spreads silently. Similarly, Tan et al. [866] use multimodal malicious prompts to infect other secure agents, compromising their security.", + "bbox": [ + 503, + 375, + 923, + 608 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Interference Attack. This attack focuses on how it interferes with and disrupts interactions within the MAS, emphasizing communication disruption and misinformation, which affect information transmission within the MAS and lead to a decline in its defensive capability. NetSafe [867] conducts extensive experiments, analyzing and revealing their structural dependencies and adversarial impacts. At the same time, Huang et al. [868] study how the resilience of MAS varies between different downstream tasks, system structures, and error types; Agent-in-the-Middle [869] manipulates and intercepts information in agent interactions through intermediary agents, disrupting the communication mechanism. The experiment validates the harm caused by the interruption of interactions by intermediary agents through a comparison of MAS with different topological structures.", + "bbox": [ + 503, + 608, + 923, + 839 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Strategic Attack. Strategic attack involves collaboration between agents and strategic optimization of attack methods, aiming to emphasize the cooperation and long-term impact of the attack, making it increasingly dangerous and more destructive. Evil Geniuses [870] modifies system roles, where these roles collaborate to generate malicious prompts. By simulating adversarial attacks and defenses,", + "bbox": [ + 503, + 840, + 921, + 941 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 32, + 421, + 44 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "they optimize and evaluate each round of attack behavior, making the attacks increasingly dangerous to target other agents. Amayuelas et al. [871] use adversarial attack techniques to enable harmful agents in the multi-agent system to collaborate in debates to persuade other secure agents. These malicious agents may exploit superior knowledge, larger model sizes, or greater persuasion power to gain an unfair advantage. Ju et al. [872] form a multi-agent community using a two-stage attack method: persuasive injection and knowledge manipulation injection, to induce agents to spread counterfactual and harmful knowledge.", + "bbox": [ + 71, + 53, + 491, + 214 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "6.3.2 Defense", + "text_level": 1, + "bbox": [ + 73, + 224, + 187, + 239 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "In response to the various attack methods mentioned above in multi-agent systems, many effective defense strategies have emerged that can be applied to MAS. Currently, many studies focus on forming agent groups to collaborate in joint defense and designing specific defense mechanisms, such as multi-round or multi-layer checks and filtering, to ensure the safety of the responses output by the MAS. Alternatively, defense can be achieved by identifying harmful agents through the propagation of malicious information and eliminating malicious sources.", + "bbox": [ + 71, + 243, + 490, + 387 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Adversarial Defense. This type of defense focuses on attack-defense confrontation, leveraging this adversarial mechanism to develop more effective defense methods or mechanisms to enhance the security of the MAS. LLAMOS [873] employs adversarial defense techniques, where defensive agents and attacking agents engage in counterinteractions, with neither fully defeating the other, thereby enhancing the robustness of the defense and improving the MAS's overall defensive capability. AutoDefense [874] proposes that agents collaborate to complete defense tasks through adversarial prompt filtering, primarily focusing on filtering harmful prompt information from LLMs. In addition to using adversarial techniques for defense, defense can also be achieved by forming a multi-agent group to engage in debates.", + "bbox": [ + 71, + 388, + 491, + 604 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Consensus Defense. To better leverage the advantages of MAS, Consensus Defense utilizes agent collaboration and consensus building for defense, employing voting, debates, and evidence-based reasoning mechanisms to establish a defense system and enhance the security of the MAS. Chern et al. [875] propose that toxicity can be reduced through multi-agent debates, and the widespread use of multi-agent interactions can lead to marginal improvements. Similarly, BlockAgent [876] proposes a Proof-of-Thought consensus mechanism that combines stake-based miner designation with multi-round debate-style voting, enabling BlockAgents to facilitate multi-agent collaboration through a structured workflow. Audit-LLM [877] proposes a pair-wise Evidence-based Multi-agent Debate mechanism, designed to defend against hallucinations by forming a MAS to detect internal threats. This approach is divided into three components: task decomposition, tool construction, and the final execution of the MAS, ultimately reaching consensus through reasoning.", + "bbox": [ + 71, + 607, + 491, + 883 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Structural Defense. Structural Defense treats the MAS as a network structure for planning defense methods, using graph analysis techniques to detect anomalies and resist attacks while incorporating knowledge from other domains", + "bbox": [ + 71, + 883, + 491, + 944 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "to enrich defense strategies in MAS. G-Safeguard [878] compares agents in MAS with various topological structures to nodes in a graph, using Graph Neural Networks (GNN) [879, 880] to detect anomalies in the agents' dialogue graphs and counter adversarial attacks and misinformation within the MAS.", + "bbox": [ + 503, + 53, + 924, + 140 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "6.4 Agent Communication Safety", + "text_level": 1, + "bbox": [ + 504, + 154, + 764, + 169 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "As Large Language Model (LLM)-based Agents evolve from isolated entities into interconnected MAS, the mechanisms governing communication between Agents, and their interactions with external environments and tools, have become increasingly critical. Agents exchange information and collaborate through message passing, tool invocation, and environmental interactions; these mechanisms, while essential to system functionality, also expose significant attack surfaces. Early methods [881, 882, 883, 884, 885, 886, 887] of Agent interaction often relied on ad-hoc approaches, such as shared memory [888], API calls [889] or unstructured function calls [890], leading to fragmented systems lacking unified security considerations. To address this challenge and enhance interoperability, standardized communication protocols have emerged. Examples include Anthropic's Model Context Protocol (MCP) [891] for Agent-tool interactions, Google's Agent2Agent (A2A) [892] for enterprise-level Agent collaboration, and the Agent Network Protocol (ANP) [893] for open network interoperability, along with other commonly used protocols [894, 895, 896, 897, 898, 899, 900, 901, 902, 903, 904]. However, the open design and dynamic nature of these communication mechanisms, coupled with the autonomy of the Agent, has exposed new vulnerabilities while enhancing functionality.", + "bbox": [ + 501, + 172, + 924, + 523 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "6.4.1 Attack", + "text_level": 1, + "bbox": [ + 504, + 531, + 604, + 545 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "The interconnected nature of MAS, facilitated by numerous communication channels, creates a multifaceted attack surface. While individual Large Language Models (LLMs) possess inherent vulnerabilities, the interactions and communications among Agents introduce novel threats that exploit the system's collaborative dynamics. These threats target various components, including communication channels, content interpretation, and underlying protocols, with examples such as Shadowing Attacks, Naming Attacks, Context Poisoning, and Rug Pulls.", + "bbox": [ + 501, + 547, + 924, + 694 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Attacks Communication Channels. These attacks directly disrupt the transmission and routing of messages in the system, affecting both inter-Agent communications and interactions with external endpoints. For instance, Agent-in-the-Middle (AiTM) attacks [869] specifically target the core communication mechanisms of LLM-MAS. By intercepting and manipulating messages between Agents, these attacks can cause Agents to perform unintended actions, thereby compromising the entire system. Such attacks underscore the critical security vulnerabilities arising from the communication-dependent nature of Agent collaboration. Furthermore, attacks targeting communication channels and transmission processes, such as communication perturbation [905], involve adversaries injecting noise into messages in transit [906] or masquerading as legitimate sources [907], thereby compromising both the efficiency and security of Agent collaboration.", + "bbox": [ + 503, + 694, + 924, + 941 + ], + "page_idx": 29 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Attacks Content. These attacks target the content of messages themselves, leveraging the mechanisms by which Agents process and interpret received information. For example, Prompt Injection involves embedding malicious instructions into data or content that Agents retrieve or receive through communication channels, thereby manipulating the Agent's behavior or decision-making processes. This technique is discussed in several works, such as [600] and [543]. Additionally, [908] explores indirect Prompt Injection within tool-based scenarios, highlighting the varied strategies employed in complex environments.", + "bbox": [ + 71, + 53, + 491, + 212 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Attacks Exploiting Multi-Agent Dynamics. These attacks leverage the interconnected structure, interaction patterns, or collective behavior of communication-driven Multi-Agent Systems (MAS) to amplify their impact or achieve strategic objectives. Contagious attacks (propagation) initiate malicious behavior on a single agent and spread it across the entire network via inter-agent communication [829, 865]. Additionally, malicious agents can coordinate through collective communication to achieve harmful goals, such as replicating malicious instructions across the network by sending replication code or commands, thereby leading to the sharing of legitimate communication keys or identity information with other malicious entities [909].", + "bbox": [ + 76, + 214, + 491, + 402 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "6.4.2 Defense", + "text_level": 1, + "bbox": [ + 73, + 415, + 187, + 428 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "To tackle threats to Agent communication, research proposes a multi-layered defense strategy addressing key points across the communication pipeline, from infrastructure to Agent-level processing. These defenses aim to prevent, detect, or mitigate attacks on channels, content, infrastructure, dynamics, and environmental factors. The strategies integrate into infrastructure and protocol design, individual Agents' message processing, and the collaborative and learning mechanisms of the MAS.", + "bbox": [ + 71, + 431, + 490, + 563 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Protocol Defenses. Protecting the foundation of Agent communication. This includes adopting standardized protocols with built-in security features (encryption, integrity checks, authentication) To counter Agent communication threats, research proposes multi-layered defense strategies targeting different points in the communication pipeline, from the underlying infrastructure to Agent-level message processing. Effective defenses aim to prevent, detect, or mitigate attacks on communication channels, content, infrastructure, such as MCP [891], A2A [892], ANP [893] standards. Establishing managed registries and identity systems for Agent and Tool/Service registration and identity management. Enforcing strong Agent identity verification and access control policies, including JIT credential provisioning. Implementing mechanisms to enforce communication dynamics, and environmental impacts.", + "bbox": [ + 71, + 564, + 491, + 796 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Content Defense. These defenses operate at the agent level, focusing on how agents process received messages and content. This includes input modification and filtering, which preprocess incoming content to neutralize adversarial elements. Agents also employ active defense mechanisms, such as reliability estimation, to assess the trustworthiness of messages based on local context, thereby mitigating the impact of untrusted information. For example, [910] proposed an active defense strategy that utilizes a reliability estimator to judge the credibility of received messages and", + "bbox": [ + 71, + 797, + 491, + 944 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "employs a decomposable message aggregation policy network to reduce the influence of unreliable messages on the final decision.", + "bbox": [ + 503, + 53, + 923, + 97 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "6.5 Agent Safety Evaluation", + "text_level": 1, + "bbox": [ + 504, + 119, + 725, + 135 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Currently, there is already a substantial body of work evaluating the performance of LLM-based agent systems on different tasks [911, 912, 913, 914, 915]. In this section, we focus on benchmarks designed to assess the security of agents. Broadly speaking, these benchmarks include those that construct datasets and those that use other agents to set up sandbox environments for evaluation, each with distinct assessment priorities and specific scenarios for agent security [314, 916, 917, 918, 919].", + "bbox": [ + 501, + 138, + 923, + 272 + ], + "page_idx": 30 + }, + { + "type": "table", + "img_path": "images/2fa9a8de989beb0f98e2c807f841f4935386a53ffedb159b40fad075d49e0a82.jpg", + "table_caption": [ + "TABLE 10: Benchmarks for agent safety." + ], + "table_footnote": [], + "table_body": "
BenchmarkDynamicLLM asEvaluatorEvaluation Focus
InjectAgent [920]Prompt Injection
AgentDojo [849]Prompt Injection
AgentBackdoorEval [816]Backdoor
RiskAwareBench [921]Embodied Agent
RedCode [916]Coding Agent
S-Eval [917]General
Bells [918]General
AgentSafetyBench [922]General
AgentSecurityBench [?]General
AgentHarm [923]General
R-Judge [314]General
ToolSowrd [924]Tool
PrivacyLens [919]Privacy
ToolEmu [925]Tool
HAIEcosystem [926]General
SafeAgentBench [927]General
JailJudge [928]Jailbreak
", + "bbox": [ + 506, + 303, + 923, + 510 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "6.5.1 Attack-Specific Benchmarks", + "text_level": 1, + "bbox": [ + 504, + 537, + 756, + 551 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "This type of benchmark focuses on testing the security of an agent when facing specific types of attacks, such as Prompt Injection [600, 929], Backdoor [817, 930, 931], and Jailbreak [874, 932]. Specifically, InjectAgent [920] evaluates LLM agents' vulnerability to indirect prompt injection attacks, measuring behavior safety when tool-integrated agents process malicious instructions embedded in external content, with hacking prompts as an enhancement. A similar work is AgentDojo [849], a dynamic, extensible evaluation framework for assessing prompt injection attacks and defenses in LLM agents by simulating realistic tasks (e.g., email management, banking) with stateful environments and multi-tool interactions under adversarial conditions. As for backdoor attacks, AgentBackdoorEval [816] includes five real-world domains (including Banking-Finance, Medical, and Social Media) with automatically generated prompts, simulated tools, and tailored backdoor triggers to assess attack stealth and effectiveness. Besides, JailJudge [928] introduces a comprehensive jailbreak evaluation benchmark featuring a voting JailJudge MultiAgent, a comprehensive JailJudgeTrain dataset, and a trained Jailjudge Guard.", + "bbox": [ + 501, + 556, + 923, + 864 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "6.5.2 Module-Specific Benchmarks", + "text_level": 1, + "bbox": [ + 504, + 878, + 764, + 893 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Currently, these benchmarks for evaluating the security of a specific module in an agent focus on the invocation of tools [933, 934, 935, 936]. For example, ToolSowrd [924] evaluates", + "bbox": [ + 503, + 898, + 923, + 944 + ], + "page_idx": 30 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 32, + 421, + 44 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 906, + 32, + 919, + 42 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "LLM safety in tool learning across three stages (input, execution, output) by designing six adversarial scenarios (e.g., malicious queries, noisy tool misdirection, harmful feedback). ToolEmu [925] employs an LM-emulated sandbox to simulate diverse high-stakes tool executions and scenarios, leveraging GPT-4 for both tool emulation and automatic safety/helpfulness evaluations.", + "bbox": [ + 71, + 53, + 491, + 157 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "6.5.3 General Benchmarks", + "text_level": 1, + "bbox": [ + 73, + 171, + 276, + 186 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "In addition to the previously mentioned benchmarks that focus on a specific aspect of agent security, some efforts have developed more comprehensive and holistic evaluation frameworks, taking into account diverse scenarios, different agents, and various offensive and defensive techniques. For instance, AgentSafetyBench [922] assesses LLM agent safety through 2,000 test cases across 349 interactive environments, covering 8 risk categories (e.g., data leaks, physical harm) and 10 failure modes (e.g., incorrect tool calls, risk unawareness), with automated scoring via a fine-tuned model. Similarly, AgentSecurityBench [?] is a comprehensive framework that formalizes and evaluates attacks (e.g., Direct/Indirect Prompt Injection, Memory Poisoning) and defenses across 10 scenarios, 10 agents, and 13 LLM backbones, using 7 evaluation metrics. SafeAgentBench [927] evaluates embodied LLM agents' safety awareness with 750 diverse tasks (detailed, abstract, long-horizon) in SafeAgentEnv simulation environment, leveraging GPT-4 for task generation and dual evaluators (execution-based and semantic). HAIEcosystem [926] evaluates safety through multi-turn interactions between human users (benign/malicious) and AI agents across 132 scenarios, using modular sandbox environment and LLM-based dynamic risk measurement. AgentHarm [923] tests agent robustness by evaluating compliance with 110 explicitly malicious multi-step tasks across 11 harm categories, using synthetic tools and fine-grained grading rubrics. Different form previous benchmarks, RiskAwareBench [921] focuses on embodied agents, evaluating physical risk awareness via four modules: safety tip generation, risky scene generation, plan generation, and automated evaluation.", + "bbox": [ + 71, + 191, + 491, + 643 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "6.5.4 LLM Deployment Roadmap", + "text_level": 1, + "bbox": [ + 73, + 659, + 318, + 674 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "In the deployment of LLMs under frozen parameters, the security landscape has evolved through a tightly coupled dynamic among attacks, defenses, and evaluation mechanisms.", + "bbox": [ + 71, + 679, + 490, + 736 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Initially, black-box attacks leveraged the generative capabilities of LLMs themselves to optimize adversarial prompts, often without precise alignment to the decision boundaries. In contrast, gradient-guided white-box methods offer greater control but face inherent limitations due to the discrete nature of token spaces resulting in prompts with weakened semantic fidelity. These attack trends have catalyzed the emergence of prompt-level defense strategies. To counter black-box attacks, recent defenses adopt prompt shaping and system-level constraints to guide and restrict the model's response behavior. For gradient-based attacks, defenses typically apply perplexity-based detection and semantic consistency checks to identify suspicious or adversarial outputs.", + "bbox": [ + 71, + 737, + 491, + 941 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "The growing sophistication of defenses reshaped the requirements for evaluation. Static, one-shot rejection mechanisms have proven insufficient in multi-task and multimodal deployments, prompting the development of dynamic strategies such as response rewriting, hierarchical permission control, and consensus-based filtering across multiple models. These strategies demand richer evaluation protocols beyond single metric assessments, shifting toward behavior metrics that capture cross-input consistency, risk under specific task conditions, and adaptability to strategy switching.", + "bbox": [ + 501, + 53, + 921, + 213 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "As the attack-defense interaction intensifies, the evaluation itself has become a critical driver of system evolution. Recent frameworks have introduced automated red teaming pipelines, enabling a closed-loop process where jailbreak samples are continually generated, tested against deployed defenses, and fed back to guide both adversarial strategies and defense refinement. This has laid the groundwork for a new paradigm in LLM security research: one where attack, defense, and evaluation are no longer treated in isolation but co-evolve as an interdependent, self-reinforcing system.", + "bbox": [ + 503, + 213, + 921, + 361 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "6.5.5 LLM Deployment Perspective", + "text_level": 1, + "bbox": [ + 504, + 369, + 764, + 385 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "(1) Attack strategies will become more structured and semantically aligned. (i) Black-box attacks may evolve through agent-based optimization, enabling sentence-level jailbreaks with clearer intent and higher success rates. (ii) To overcome the limitations of token-level gradient attacks, future work may focus on generating semantically consistent adversarial prompts that are less detectable by perplexity-based defenses. (iii) Open-source models will serve as surrogates for closed models, allowing attackers to replicate decision boundaries before launching white-box attacks. (iv) Variants from fine-tuning pipelines may leak private information through cross-model comparison, introducing version-aware privacy risks.", + "bbox": [ + 503, + 388, + 921, + 578 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(2) Defenses will shift toward adaptive and transferable mechanisms. (i) Prompt-based defenses will evolve into context-aware controllers that adjust behavior based on input semantics and task context. (ii) Generalizable defenses that work across domains and languages will be critical for scalable deployment. (iii) Future systems may support online updates, enabling continuous refinement in response to new threats.", + "(3) Evaluation will act as both a diagnostic and driving force. (i) Benchmarks must expand beyond text to cover multimodal inputs and tool-based actions. (ii) Multi-objective evaluation will replace single-metric scoring, balancing safety and utility through trade-off analysis. (iii) Static test sets will give way to adaptive, streaming benchmarks that evolve with attack trends. (iv) Automated red teaming will close the loop, enabling real-time attack generation, evaluation, and defense adjustment." + ], + "bbox": [ + 503, + 578, + 921, + 825 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "6.5.6 Agent Roadmap", + "text_level": 1, + "bbox": [ + 504, + 837, + 671, + 852 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Agent. The evolution of LLM-based agents originated from role-playing paradigms [801, 937, 938, 939], where researchers investigated organizational structures, role allocation mechanisms, and implementation workflows for task-oriented agents in various social contexts. These systematic explorations not only demonstrated agents' potential in", + "bbox": [ + 501, + 854, + 923, + 944 + ], + "page_idx": 31 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "addressing human societal challenges but also spawned interdisciplinary research programs spanning sociology, organizational theory, and psychology. As the field advanced, research focus shifted toward automated agent workflows [795, 860, 940, 941], domain-specific methods for embodied intelligence, and the development of agent capabilities in tool utilization and memory management. Through this progression, agent systems have emerged as a transformative paradigm for automating human social processes, gaining significant recognition as a viable solution for complex societal automation.", + "bbox": [ + 71, + 53, + 491, + 212 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "The rapid advancement of agent capabilities and architectures has brought safety concerns to the forefront of academic and industrial research. These challenges span multiple critical dimensions: tool safety, memory security, and the agent's fundamental operational integrity. Inheriting both the capabilities and vulnerabilities of their underlying LLM foundations, agents intrinsically carry these \"genetic\" weaknesses into more complex operational environments. This inheritance makes safety vulnerabilities particularly acute in agent systems, especially when handling sensitive real-world applications involving personal privacy and financial assets. The development of agent technologies has thus become inextricably linked with safety considerations. Recent years ( $\\sim$ 2023- until now) have witnessed accelerated research in agent safety, focusing on four key frontiers:", + "bbox": [ + 71, + 213, + 491, + 433 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Agent Brain Security: The core decision-making mechanisms.", + "- Tool Invocation Safety: Secure external API and tool usage.", + "- Memory Retrieval Protection: Robustness against memory poisoning.", + "- Communication Protocol Security: Safe multi-agent interactions." + ], + "bbox": [ + 89, + 436, + 488, + 551 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Emerging work has also begun addressing safety challenges in embodied agent scenarios, marking an important expansion of the research domain.", + "bbox": [ + 71, + 556, + 491, + 599 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "6.5.7 Perspective", + "text_level": 1, + "bbox": [ + 73, + 616, + 210, + 631 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "We outline potential future research directions for agent systems and analyze their developmental trajectory:", + "bbox": [ + 71, + 635, + 488, + 664 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) Safety of External Agent Modules. Unlike standalone LLMs, agents interact with external modules (e.g., tools, memory), which are exposed to open environments and thus more vulnerable to attacks. Key research challenges include: (i) Tool Safety: Secure tool invocation and API usage to prevent adversarial exploitation. (ii) Memory Protection: Robustness against memory poisoning and unauthorized access, to name just a few. These external interfaces introduce unique attack surfaces, making their security a critical research priority.", + "(2) Stability and Reliability of Dynamically Updated Agents via Reinforcement Learning: As reinforcement learning (RL) [35, 942, 943] techniques become increasingly integrated with LLM-based agents, these systems are being deployed in more complex and dynamic environments. While this integration enhances agents' adaptability and intelligence, it also introduces significant risks: (i) Emergent Threats: Advanced RL capabilities may inadvertently enable agents to learn and propagate harmful behaviors or danger-" + ], + "bbox": [ + 71, + 665, + 491, + 944 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "ous information. (ii) Dynamic Vulnerability: Continuous online learning increases exposure to adversarial perturbations or reward hacking.", + "bbox": [ + 501, + 53, + 921, + 97 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Critical Research Directions: (i) Safe RL Frameworks: Developing constrained optimization methods to bound agent behavior within ethical and operational guardrails. (ii) Stability-Aware Updates: Designing update protocols that balance adaptability with robustness (e.g., catastrophic forgetting mitigation). (iii) Anomaly Detection: Real-time monitoring of learning trajectories to identify and neutralize hazardous knowledge acquisition.", + "bbox": [ + 501, + 97, + 921, + 213 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "(3) Safety of Embodied Agents in Domain-Specific Scenarios: As autonomous agents become increasingly deployed across specialized domains, their safety considerations must account for unique domain-specific vulnerabilities. We list some key challenges as follows:", + "bbox": [ + 503, + 213, + 921, + 286 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Web Agents:", + "- HTML/JS injection risks during automated browsing", + "- Secure sandboxing requirements for DOM manipulation", + "- Cross-site scripting (XSS) vulnerabilities in automated form-filling" + ], + "bbox": [ + 521, + 289, + 921, + 378 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "- Communication Agents:", + "bbox": [ + 522, + 381, + 714, + 395 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Protocol-level attacks (e.g., SIP flooding, WebRTC exploits)", + "- End-to-end encryption requirements for sensitive dialogues", + "- Authentication bypass in voice-based agents" + ], + "bbox": [ + 535, + 397, + 921, + 470 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Robotics Control Agents:", + "bbox": [ + 522, + 474, + 720, + 488 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Physical safety constraints in actuator commands", + "Real-time collision avoidance verification", + "- Emergency stop mechanism reliability" + ], + "bbox": [ + 535, + 489, + 895, + 534 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Healthcare Agents:", + "bbox": [ + 522, + 537, + 676, + 551 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Medical decision audit trail requirements", + "- Drug interaction verification systems" + ], + "bbox": [ + 535, + 553, + 841, + 583 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "7 SAFETY IN LLM-BASED APPLICATION", + "text_level": 1, + "bbox": [ + 504, + 602, + 846, + 616 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "In this section, we focus on the security considerations that should be addressed following the commercialization of LLMs into practical applications. With the rapid development of LLMs in fields such as content creation, intelligent interaction, automated programming, medical diagnosis, and financial analysis, LLM-based applications are reshaping industry workflows and business models [944]. However, while LLMs significantly enhance productivity and facilitate human-machine collaboration, their large-scale deployment has also introduced severe security challenges [66]. Ensuring the security, reliability, and compliance of LLM-based applications has become a critical issue in AI research and real-world implementation.", + "bbox": [ + 501, + 621, + 921, + 811 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Truthfulness. Despite their powerful text generation capabilities, LLMs exhibit hallucination phenomena, generating inaccurate, misleading, or entirely fictitious content [945, 946, 947, 948, 949]. Unlike traditional errors, hallucinations are often subtle and linguistically plausible, making them especially dangerous in real-world applications. This challenge is exacerbated in high-stakes domains such as healthcare, law, and finance, where misleading AI-generated information can directly affect human safety and economic", + "bbox": [ + 501, + 811, + 921, + 941 + ], + "page_idx": 32 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/792d0e5c90e63607687a3b7c2093f939694dd3631a5e08614fac6eb7112e1843.jpg", + "image_caption": [ + "Fig. 14: We illustrate the diverse applications of AI in enterprise productivity, content generation, programming, healthcare, finance, customer support, education, and cyber-security. We also highlight critical issues related to truthfulness and privacy, including data leakage, security threats, property rights, fairness, and regulatory compliance, underscoring the need for robust safeguards in AI deployment" + ], + "image_footnote": [], + "bbox": [ + 73, + 55, + 921, + 501 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "stability. For example, an LLM-powered clinical assistant may suggest nonexistent diseases or cite unverified treatments, posing risks to patients [739, 950], while financial advisors powered by LLMs might generate persuasive but flawed market forecasts, leading to significant capital misallocation or systemic financial vulnerabilities [951]. Specifically, hallucination is not merely a surface-level output flaw but a systemic artifact rooted in the model's training dynamics and the nature of its data. Specifically, hallucination can stem from three compounding factors: (1) semantic overgeneralization due to exposure to noisy, unverified, or synthetic pretraining corpora; (2) objective misalignment, where maximum-likelihood or reinforcement-based training prioritizes coherence and helpfulness over factual accuracy; and (3) latent distribution shifts between pretraining and deployment-time inputs, particularly under long-tail or adversarial queries [952, 953]. These factors jointly reinforce spurious correlations and amplify unsupported generations, even in otherwise well-aligned models. In sum, hallucination represents a critical bottleneck for the reliable deployment of LLMs. Its mitigation is foundational not only for improving user trust but also for enabling the safe integration of LLMs into high-stakes decision-making", + "bbox": [ + 71, + 594, + 493, + 931 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "workflows.", + "bbox": [ + 504, + 594, + 586, + 608 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Privacy. Data privacy concerns [954] represent another significant challenge in LLM deployment [821, 955]. Training these models requires vast amounts of text data, which may include personal information, corporate secrets, and medical records [956]. If an LLM inadvertently leaks sensitive training data or lacks robust access control mechanisms, users' private information could be exploited or misused. In corporate settings, LLMs may unintentionally expose confidential documents or sensitive customer data, leading to severe compliance and legal risks. Moreover, inference-time attacks [957], such as membership inference and model extraction, can further expose sensitive data by allowing adversaries to infer training set membership or replicate model behavior. Therefore, LLM-based applications must incorporate data protection measures and privacy-preserving techniques like differential privacy and query rate limiting to mitigate information leakage risks.", + "bbox": [ + 503, + 614, + 923, + 864 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Robustness. Prompt injection [543] and jailbreak [636] risks pose additional security threats. Attackers can craft adversarial prompts to bypass security restrictions, causing the model to generate harmful or unauthorized content. For example, in chatbot systems, malicious users could manip", + "bbox": [ + 503, + 868, + 921, + 944 + ], + "page_idx": 33 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "ulate LLMs to generate hate speech, disinformation, or even harmful instructions. Similarly, in AI-powered coding assistants such as GitHub Copilot, attackers may exploit LLMs to produce code with security vulnerabilities, potentially serving as backdoors for future cyberattacks. Developing robust security defenses to prevent LLMs from being misused in real-world applications is crucial for AI safety.", + "bbox": [ + 71, + 53, + 491, + 155 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Copyright. Another pressing concern is intellectual property and copyright protection [958, 959, 960]. LLMs are trained on vast datasets that often include copyrighted texts, source code, and artistic works, raising potential infringement risks. When generating content, LLMs may inadvertently replicate or closely mimic copyrighted material, leading to legal disputes. For instance, AI-powered writing tools might generate articles resembling published works, while coding assistants could produce open-source code snippets without proper licensing [961]. This not only raises concerns about content originality but also introduces legal and ethical dilemmas. Addressing these challenges requires watermarking [962, 963], provenance tracking, and clear copyright attribution mechanisms to ensure responsible AI-generated content management [178].", + "bbox": [ + 76, + 156, + 491, + 375 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Ethical and Social Responsibility. Beyond technical concerns, ethical and social responsibility are also critical factors in large-scale LLM deployment. Due to biases in training data, LLMs may generate content that reinforces stereotypes, gender discrimination, or racial biases [964, 965]. In sectors such as hiring, finance, and healthcare, biased AI-generated recommendations could exacerbate existing inequalities and lead to unfair decision-making. Moreover, as LLMs become increasingly integrated into virtual assistants, social media, and news distribution platforms, concerns over AI-generated misinformation, transparency, and accountability are growing. Building fair, transparent, and trustworthy AI governance frameworks is thus essential to mitigating AI-induced social risks.", + "bbox": [ + 76, + 375, + 491, + 577 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Governance. As governments worldwide strengthen AI regulations, LLM-related legal and compliance requirements are evolving rapidly. The EU AI Act classifies LLMs as high-risk AI systems, requiring developers to provide transparency reports and risk control mechanisms [966]. China's Generative AI Regulations mandate AI-generated content to align with ethical standards and undergo governmental scrutiny [967]. In the United States, regulatory discussions emphasize AI transparency and data privacy protections, urging businesses to establish responsible AI practices [968]. These policy developments indicate that LLM-based applications must comply with regional regulations while maintaining a balance between compliance and innovation.", + "bbox": [ + 71, + 578, + 491, + 781 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "In summary, while LLM-based applications drive technological progress, they also introduce multifaceted challenges related to misinformation, data privacy, adversarial manipulation, copyright infringement, ethical concerns, and regulatory compliance (refer to Figure 14). These issues not only impact the trustworthiness and legality of AI technologies but also have far-reaching implications for social trust, legal accountability, and business sustainability. Addressing these challenges necessitates a comprehensive approach that integrates privacy protection, content governance, copyright management, ethical safeguards, and regulatory compli", + "bbox": [ + 71, + 782, + 491, + 944 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "ance, alongside collaborative efforts from both academia and industry.", + "bbox": [ + 503, + 53, + 921, + 83 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "8 POTENTIAL RESEARCH DIRECTIONS", + "text_level": 1, + "bbox": [ + 504, + 104, + 831, + 119 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Through a systematic and comprehensive examination of safety across the entire lifecycle of LLMs, we have identified valuable insights for future research:", + "bbox": [ + 503, + 125, + 923, + 167 + ], + "page_idx": 34 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Data generation holds immense potential, particularly in ensuring the safety of generated data and automating the data generation process, which is crucial for reliable and robust model training. Reliable data generation is fundamental to the integrity of model training.", + "$\\star$ Post-training phases are becoming increasingly critical. Ensuring secure fine-tuning and alignment of data is a key future direction, closely intertwined with data generation. As concepts proliferate, multi-objective alignment may emerge as a significant area of focus.", + "$\\star$ Model editing and unlearning safety are paramount for efficient model updates and deployment. Current learning efficiencies are suboptimal, and advancements in these technologies could revolutionize how models acquire new knowledge, enabling continuous and efficient learning (potentially even localized memory learning). These techniques might surpass traditional SGD algorithms, but safety measures are essential to prevent models from devolving into malicious entities that contradict human intentions.", + "$\\star$ LLM agents, in the final deployment stage, require robust safety assurances. Ensuring the security of agent tools and agent memory, as well as addressing safety in embodied intelligence scenarios such as web agents and computer agents, are critical areas for further investigation." + ], + "bbox": [ + 516, + 170, + 921, + 551 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "9 CONCLUSION", + "text_level": 1, + "bbox": [ + 504, + 571, + 648, + 585 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "In this survey, we provide a comprehensive analysis of the safety concerns across the entire lifecycle of LLMs, from data preparation and pre-training to post-training, deployment, and commercialization. By introducing the concept of \"fullstack\" safety, we offer an integrated view of the security and safety issues faced by LLMs throughout their development and usage, which addresses gaps in the existing literature that typically focus on specific stages of the lifecycle.", + "bbox": [ + 501, + 592, + 921, + 709 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Through an exhaustive review of over $900+$ papers, we systematically examined and organized the safety issues spanning key stages of LLM production, deployment, and use, including data generation, alignment techniques, model editing, and LLM-based agent systems and LLM-based applications. Our findings highlight the critical vulnerabilities at each stage, such as privacy risks, toxic data, harmful fine-tuning attacks, and deployment challenges. The safety of LLMs is a multifaceted issue requiring careful attention to data integrity, model alignment, and post-deployment security measures. Moreover, we propose promising directions for future research, including improvements in data safety, alignment techniques, and defense mechanisms for LLM-based agents. This work is vital for guiding future efforts to make LLMs safer and more reliable, especially as they become increasingly integral to various industries", + "bbox": [ + 501, + 709, + 923, + 944 + ], + "page_idx": 34 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "35", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "and applications. Ensuring robust security across the entire LLM lifecycle is crucial for their responsible and effective deployment in real-world scenarios.", + "bbox": [ + 71, + 53, + 491, + 98 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 73, + 128, + 189, + 143 + ], + "page_idx": 35 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] L. Ouyang, J. Wu, X. Jiang, D. Almeida, C. Wainwright, P. Mishkin, C. Zhang, S. Agarwal, K. Slama, A. Ray et al., \"Training language models to follow instructions with human feedback,\" Advances in neural information processing systems, vol. 35, pp. 27730-27744, 2022.", + "[2] H. Touvron, T. Lavril, G. Izacard, X. Martinet, M.-A. Lachaux, T. Lacroix, B. Rozière, N. Goyal, E. Hambro, F. Azhar et al., \"Llama: Open and efficient foundation language models,\" arXiv preprint arXiv:2302.13971, 2023.", + "[3] J. Bai, S. Bai, Y. Chu, Z. Cui, K. Dang, X. Deng, Y. Fan, W. Ge, Y. Han, F. Huang et al., \"Qwen technical report,\" arXiv preprint arXiv:2309.16609, 2023.", + "[4] A. Liu, B. Feng, B. Xue, B. Wang, B. Wu, C. Lu, C. Zhao, C. Deng, C. Zhang, C. Ruan et al., \"Deepseek-v3 technical report,\" arXiv preprint arXiv:2412.19437, 2024.", + "[5] D. Guo, D. Yang, H. Zhang, J. Song, R. Zhang, R. Xu, Q. Zhu, S. Ma, P. Wang, X. Bi et al., \"Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning,\" arXiv preprint arXiv:2501.12948, 2025.", + "[6] W. X. Zhao, K. Zhou, J. Li, T. Tang, X. Wang, Y. Hou, Y. Min, B. Zhang, J. Zhang, Z. Dong et al., \"A survey of large language models,\" arXiv preprint arXiv:2303.18223, vol. 1, no. 2, 2023.", + "[7] Y. Chang, X. Wang, J. Wang, Y. Wu, L. Yang, K. Zhu, H. Chen, X. Yi, C. Wang, Y. Wang et al., \"A survey on evaluation of large language models,\" ACM transactions on intelligent systems and technology, vol. 15, no. 3, pp. 1-45, 2024.", + "[8] M. U. Hadi, R. Qureshi, A. Shah, M. Irfan, A. Zafar, M. B. Shaikh, N. Akhtar, J. Wu, S. Mirjalili et al., \"A survey on large language models: Applications, challenges, limitations, and practical usage,\" Authorea Preprints, vol. 3, 2023.", + "[9] Y. Yan, S. Wang, J. Huo, J. Ye, Z. Chu, X. Hu, P. S. Yu, C. Gomes, B. Selman, and Q. Wen, \"Position: Multimodal large language models can significantly advance scientific reasoning,\" arXiv preprint arXiv:2502.02871, 2025.", + "[10] Y. Yan, J. Su, J. He, F. Fu, X. Zheng, Y. Lyu, K. Wang, S. Wang, Q. Wen, and X. Hu, “A survey of mathematical reasoning in the era of multimodal large language model: Benchmark, method & challenges,” arXiv preprint arXiv:2412.11936, 2024.", + "[11] X. Zou, Y. Yan, X. Hao, Y. Hu, H. Wen, E. Liu, J. Zhang, Y. Li, T. Li, Y. Zheng et al., \"Deep learning for cross-domain data fusion in urban computing: Taxonomy, advances, and outlook,\" Information Fusion, vol. 113, p. 102606, 2025.", + "[12] Y. Li, X. Zhang, L. Luo, H. Chang, Y. Ren, I. King, and J. Li, “G-refer: Graph retrieval-augmented large" + ], + "bbox": [ + 73, + 154, + 491, + 943 + ], + "page_idx": 35 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "language model for explainable recommendation,\" arXiv preprint arXiv:2502.12586, 2025.", + "[13] S. Sun, R. Liu, J. Lyu, J.-W. Yang, L. Zhang, and X. Li, \"A large language model-driven reward design framework via dynamic feedback for reinforcement learning,\" arXiv preprint arXiv:2410.14660, 2024.", + "[14] S. Sonko, A. O. Adewusi, O. C. Obi, S. Onwusinkwue, and A. Atadoga, “A critical review towards artificial general intelligence: Challenges, ethical considerations, and the path forward,” World Journal of Advanced Research and Reviews, vol. 21, no. 3, pp. 1262-1268, 2024.", + "[15] S. McLean, G. J. Read, J. Thompson, C. Baber, N. A. Stanton, and P. M. Salmon, \"The risks associated with artificial general intelligence: A systematic review,\" Journal of Experimental & Theoretical Artificial Intelligence, vol. 35, no. 5, pp. 649-663, 2023.", + "[16] R. Liu, J. Gao, J. Zhao, K. Zhang, X. Li, B. Qi, W. Ouyang, and B. Zhou, \"Can 1b llm surpass 405b llm? rethinking compute-optimal test-time scaling,\" arXiv preprint arXiv:2502.06703, 2025.", + "[17] J. Ruan, Y. Chen, B. Zhang, Z. Xu, T. Bao, H. Mao, Z. Li, X. Zeng, R. Zhao et al., \"Tptu: Task planning and tool usage of large language model-based ai agents,\" in NeurIPS 2023 Foundation Models for Decision Making Workshop, 2023.", + "[18] V. Sorin, E. Klang, M. Sklair-Levy, I. Cohen, D. B. Zippel, N. Balint Lahat, E. Konen, and Y. Barash, \"Large language model (chatgpt) as a support tool for breast tumor board,\" NPJ Breast Cancer, vol. 9, no. 1, p. 44, 2023.", + "[19] R. Yang, L. Song, Y. Li, S. Zhao, Y. Ge, X. Li, and Y. Shan, \"Gpt4tools: Teaching large language model to use tools via self-instruction,\" Advances in Neural Information Processing Systems, vol. 36, pp. 71-995-72007, 2023.", + "[20] T. Schick, J. Dwivedi-Yu, R. Dessi, R. Raileanu, M. Lomeli, E. Hambro, L. Zettlemoyer, N. Cancedda, and T. Scialom, \"Toolformer: Language models can teach themselves to use tools,\" Advances in Neural Information Processing Systems, vol. 36, pp. 68-59-68-551, 2023.", + "[21] W. Zhong, L. Guo, Q. Gao, H. Ye, and Y. Wang, \"Memorybank: Enhancing large language models with long-term memory,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 17, 2024, pp. 19724-19731.", + "[22] W. Wang, L. Dong, H. Cheng, X. Liu, X. Yan, J. Gao, and F. Wei, \"Augmenting language models with long-term memory,\" Advances in Neural Information Processing Systems, vol. 36, pp. 74530-74543, 2023.", + "[23] Z. Zhang, X. Bo, C. Ma, R. Li, X. Chen, Q. Dai, J. Zhu, Z. Dong, and J.-R. Wen, \"A survey on the memory mechanism of large language model based agents,\" arXiv preprint arXiv:2404.13501, 2024.", + "[24] J. Huo, Y. Yan, B. Hu, Y. Yue, and X. Hu, \"Mmneuron: Discovering neuron-level domain-specific interpretation in multimodal large language model,\" arXiv preprint arXiv:2406.11193, 2024.", + "[25] W. Liu, X. Huang, X. Zeng, X. Hao, S. Yu, D. Li, S. Wang, W. Gan, Z. Liu, Y. Yu et al., \"Toolace: Win" + ], + "bbox": [ + 506, + 53, + 921, + 943 + ], + "page_idx": 35 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 35 + }, + { + "type": "page_number", + "text": "36", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 35 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ning the points of llm function calling,\" arXiv preprint arXiv:2409.00920, 2024.", + "[26] Q. Tang, Z. Deng, H. Lin, X. Han, Q. Liang, B. Cao, and L. Sun, \"Toolalpaca: Generalized tool learning for language models with 3000 simulated cases,\" arXiv preprint arXiv:2306.05301, 2023.", + "[27] T. Guo, X. Chen, Y. Wang, R. Chang, S. Pei, N. V. Chawla, O. Wiest, and X. Zhang, \"Large language model based multi-agents: A survey of progress and challenges,\" arXiv preprint arXiv:2402.01680, 2024.", + "[28] L. Wang, C. Ma, X. Feng, Z. Zhang, H. Yang, J. Zhang, Z. Chen, J. Tang, X. Chen, Y. Lin et al., \"A survey on large language model based autonomous agents,\" Frontiers of Computer Science, vol. 18, no. 6, p. 186345, 2024.", + "[29] Z. Xi, W. Chen, X. Guo, W. He, Y. Ding, B. Hong, M. Zhang, J. Wang, S. Jin, E. Zhou et al., \"The rise and potential of large language model based agents: A survey,\" Science China Information Sciences, vol. 68, no. 2, p. 121101, 2025.", + "[30] Y. Yan and J. Lee, \"Georeasoner: Reasoning on geospatially grounded context for natural language understanding,\" in Proceedings of the 33rd ACM International Conference on Information and Knowledge Management, 2024, pp. 4163-4167.", + "[31] A. Majumdar, K. Yadav, S. Arnaud, J. Ma, C. Chen, S. Silwal, A. Jain, V-P. Berges, T. Wu, J. Vakil et al., \"Where are we in the search for an artificial visual cortex for embodied intelligence?\" Advances in Neural Information Processing Systems, vol. 36, pp. 655-677, 2023.", + "[32] M. Zhou, H. Dong, H. Song, N. Zheng, W.-H. Chen, and H. Wang, \"Embodied intelligence-based perception, decision-making, and control for autonomous operations of rail transportation,\" IEEE Transactions on Intelligent Vehicles, 2024.", + "[33] X. Ma, Y. Gao, Y. Wang, R. Wang, X. Wang, Y. Sun, Y. Ding, H. Xu, Y. Chen, Y. Zhao et al., \"Safety at scale: A comprehensive survey of large model safety,\" arXiv preprint arXiv:2502.05206, 2025.", + "[34] K. Kumar, T. Ashraf, O. Thawakar, R. M. Anwer, H. Cholakkal, M. Shah, M.-H. Yang, P. H. Torr, S. Khan, and F. S. Khan, \"Llm post-training: A deep dive into reasoning large language models,\" arXiv preprint arXiv:2502.21321, 2025.", + "[35] Z.-Z. Li, D. Zhang, M.-L. Zhang, J. Zhang, Z. Liu, Y. Yao, H. Xu, J. Zheng, P.-J. Wang, X. Chen et al., \"From system 1 to system 2: A survey of reasoning large language models,\" arXiv preprint arXiv:2502.17419, 2025.", + "[36] Y. Chen, W. Sun, C. Fang, Z. Chen, Y. Ge, T. Han, Q. Zhang, Y. Liu, Z. Chen, and B. Xu, \"Security of language models for code: A systematic literature review,\" ACM Transactions on Software Engineering and Methodology, vol. 1, no. 1, pp. 1-66, 2025.", + "[37] W. Qu, Y. Zhou, Y. Wu, T. Xiao, B. Yuan, Y. Li, and J. Zhang, \"Prompt inversion attack against collaborative inference of large language models,\" in IEEE S&P, 2025.", + "[38] J. Wu, S. Yang, R. Zhan, Y. Yuan, L. S. Chao, and D. F. Wong, \"A survey on llm-generated text detection: Ne" + ], + "bbox": [ + 75, + 53, + 491, + 943 + ], + "page_idx": 36 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "cessity, methods, and future directions,\" Computational Linguistics, pp. 1-66, 2025.", + "[39] H. Wang, J. Li, H. Wu, E. Hovy, and Y. Sun, \"Pre-trained language models and their applications,\" *Engineering*, vol. 25, pp. 51-65, 2023.", + "[40] C. Zhou, Q. Li, C. Li, J. Yu, Y. Liu, G. Wang, K. Zhang, C. Ji, Q. Yan, L. He et al., \"A comprehensive survey on pretrained foundation models: A history from bert to chatgpt,\" International Journal of Machine Learning and Cybernetics, pp. 1-65, 2024.", + "[41] X. Zhang, X. Zhu, and L. Lessard, \"Online data poisoning attacks,\" in Learning for Dynamics and Control. PMLR, 2020, pp. 201-210.", + "[42] M. Goldblum, D. Tsipras, C. Xie, X. Chen, A. Schwarzschild, D. Song, A. Madry, B. Li, and T. Goldstein, \"Dataset security for machine learning: Data poisoning, backdoor attacks, and defenses,\" IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 45, no. 2, pp. 1563-1580, 2022.", + "[43] N. Lukas, A. Salem, R. Sim, S. Tople, L. Wutschitz, and S. Zanella-Béguelin, \"Analyzing leakage of personally identifiable information in language models,\" in 2023 IEEE Symposium on Security and Privacy (SP). IEEE, 2023, pp. 346-363.", + "[44] W. Sun, Y. Chen, C. Fang, Y. Feng, Y. Xiao, A. Guo, Q. Zhang, Y. Liu, B. Xu, and Z. Chen, \"Eliminating backdoors in neural code models for secure code understanding,\" in Proceedings of the 33rd ACM International Conference on the Foundations of Software Engineering. Trondheim, Norway: ACM, Mon 23 - Fri 27 June 2025, pp. 1-23.", + "[45] H. R. Kirk, B. Vidgen, P. Röttger, and S. A. Hale, \"The benefits, risks and bounds of personalizing the alignment of large language models to individuals,\" Nature Machine Intelligence, vol. 6, no. 4, pp. 383-392, 2024.", + "[46] Z. Zhou, H. Yu, X. Zhang, R. Xu, F. Huang, and Y. Li, \"How alignment and jailbreak work: Explain llm safety through intermediate hidden states,\" in Findings of the Association for Computational Linguistics: EMNLP 2024, 2024, pp. 2461-2488.", + "[47] X. Qi, Y. Zeng, T. Xie, P.-Y. Chen, R. Jia, P. Mittal, and P. Henderson, \"Fine-tuning aligned language models compromises safety, even when users do not intend to!\" in ICLR, 2024. [Online]. Available: https://openreview.net/forum?id=hTEGyKf0dZ", + "[48] X. Qi, A. Panda, K. Lyu, X. Ma, S. Roy, A. Beirami, P. Mittal, and P. Henderson, \"Safety alignment should be made more than just a few tokens deep,\" in The Thirteen International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=6Mxhg9PtDE", + "[49] D. Halawi, A. Wei, E. Wallace, T. T. Wang, N. Hagh-talab, and J. Steinhardt, \"Covert malicious finetuning: Challenges in safeguarding LLM adaptation,\" in Proceedings of the 41st International Conference on Machine Learning. PMLR, 2024, pp. 17298-17312.", + "[50] W. Hawkins, B. Mittelstadt, and C. Russell, \"The effect of fine-tuning on language model toxicity,\" in Neurips Safe Generative AI Workshop 2024, 2024.", + "[51] J. Huang and J. Zhang, \"A survey on evaluation of" + ], + "bbox": [ + 506, + 53, + 921, + 943 + ], + "page_idx": 36 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 36 + }, + { + "type": "page_number", + "text": "37", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 36 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "multimodal large language models,\" arXiv preprint arXiv:2408.15769, 2024.", + "[52] P. Röttger, F. Pernisi, B. Vidgen, and D. Hovy, \"Safetyprompts: a systematic review of open datasets for evaluating and improving large language model safety,\" arXiv preprint arXiv:2404.05399, 2024.", + "[53] Y. Dong, R. Mu, Y. Zhang, S. Sun, T. Zhang, C. Wu, G. Jin, Y. Qi, J. Hu, J. Meng et al., \"Safeguarding large language models: A survey,\" arXiv preprint arXiv:2406.02622, 2024.", + "[54] Y. Wang, Y. Pan, Q. Zhao, Y. Deng, Z. Su, L. Du, and T. H. Luan, \"Large model agents: State-of-the-art, cooperation paradigms, security and privacy, and future trends,\" arXiv preprint arXiv:2409.14457, 2024.", + "[55] G. Zhang, K. Chen, G. Wan, H. Chang, H. Cheng, K. Wang, S. Hu, and L. Bai, \"Evoflow: Evolving diverse agentic workflows on the fly,\" arXiv preprint arXiv:2502.07373, 2025.", + "[56] G. Zhang, L. Niu, J. Fang, K. Wang, L. Bai, and X. Wang, \"Multi-agent architecture search via agentic supernet,\" arXiv preprint arXiv:2502.04180, 2025.", + "[57] G. Zhang, Y. Yue, Z. Li, S. Yun, G. Wan, K. Wang, D. Cheng, J. X. Yu, and T. Chen, \"Cut the crap: An economical communication pipeline for llm-based multi-agent systems,\" arXiv preprint arXiv:2410.02506, 2024.", + "[58] Y. Yue, G. Zhang, B. Liu, G. Wan, K. Wang, D. Cheng, and Y. Qi, \"Masrouter: Learning to route llms for multi-agent systems,\" 2025. [Online]. Available: https://arxiv.org/abs/2502.11133", + "[59] Z. Liang, Y. Xu, Y. Hong, P. Shang, Q. Wang, Q. Fu, and K. Liu, \"A survey of multimodel large language models,\" in Proceedings of the 3rd International Conference on Computer, Artificial Intelligence and Control Engineering, 2024, pp. 405-409.", + "[60] S. Zhang, L. Dong, X. Li, S. Zhang, X. Sun, S. Wang, J. Li, R. Hu, T. Zhang, F. Wu et al., \"Instruction tuning for large language models: A survey,\" arXiv preprint arXiv:2308.10792, 2023.", + "[61] H. Zhao, H. Chen, F. Yang, N. Liu, H. Deng, H. Cai, S. Wang, D. Yin, and M. Du, \"Explainability for large language models: A survey,\" ACM Transactions on Intelligent Systems and Technology, vol. 15, no. 2, pp. 1-38, 2024.", + "[62] T. Shen, R. Jin, Y. Huang, C. Liu, W. Dong, Z. Guo, X. Wu, Y. Liu, and D. Xiong, \"Large language model alignment: A survey,\" arXiv preprint arXiv:2309.15025, 2023.", + "[63] M. A. K. Raiaan, M. S. H. Mukta, K. Fatema, N. M. Fahad, S. Sakib, M. M. J. Mim, J. Ahmad, M. E. Ali, and S. Azam, \"A review on large language models: Architectures, applications, taxonomies, open issues and challenges,\" IEEE access, vol. 12, pp. 26839-26874, 2024.", + "[64] K. S. Kalyan, \"A survey of gpt-3 family large language models including chatgpt and gpt-4,\" Natural Language Processing Journal, vol. 6, p. 100048, 2024.", + "[65] E. Shayegani, M. A. A. Mamun, Y. Fu, P. Zaree, Y. Dong, and N. Abu-Ghazaleh, \"Survey of vulnerabilities in large language models revealed by adversarial attacks,\" arXiv preprint arXiv:2310.10844, 2023." + ], + "bbox": [ + 75, + 53, + 491, + 941 + ], + "page_idx": 37 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[66] Y. Yao, J. Duan, K. Xu, Y. Cai, Z. Sun, and Y. Zhang, \"A survey on large language model (llm) security and privacy: The good, the bad, and the ugly,\" High-Confidence Computing, p. 100211, 2024.", + "[67] L. Qin, Q. Chen, Y. Zhou, Z. Chen, Y. Li, L. Liao, M. Li, W. Che, and P. S. Yu, \"Multilingual large language model: A survey of resources, taxonomy and frontiers,\" arXiv preprint arXiv:2404.04925, 2024.", + "[68] M. U. Hadi, R. Qureshi, A. Shah, M. Irfan, A. Zafar, M. B. Shaikh, N. Akhtar, J. Wu, S. Mirjalili et al., \"Large language models: a comprehensive survey of its applications, challenges, limitations, and future prospects,\" Authorea Preprints, vol. 1, pp. 1-26, 2023.", + "[69] L. Sun, Y. Huang, H. Wang, S. Wu, Q. Zhang, C. Gao, Y. Huang, W. Lyu, Y. Zhang, X. Li et al., \"Trustllm: Trustworthiness in large language models,\" arXiv preprint arXiv:2401.05561, vol. 3, 2024.", + "[70] B. C. Das, M. H. Amini, and Y. Wu, \"Security and privacy challenges of large language models: A survey,\" ACM Computing Surveys, vol. 57, no. 6, pp. 1-39, 2025.", + "[71] F. He, T. Zhu, D. Ye, B. Liu, W. Zhou, and P. S. Yu, \"The emerged security and privacy of llm agent: A survey with case studies,\" arXiv preprint arXiv:2407.19354, 2024.", + "[72] G. Tie, Z. Zhao, D. Song, F. Wei, R. Zhou, Y. Dai, W. Yin, Z. Yang, J. Yan, Y. Su et al., \"A survey on post-training of large language models,\" arXiv preprint arXiv:2503.06072, 2025.", + "[73] Y. Huang, C. Gao, S. Wu, H. Wang, X. Wang, Y. Zhou, Y. Wang, J. Ye, J. Shi, Q. Zhang et al., \"On the trustworthiness of generative foundation models: Guideline, assessment, and perspective,\" arXiv preprint arXiv:2502.14296, 2025.", + "[74] M. Yu, F. Meng, X. Zhou, S. Wang, J. Mao, L. Pang, T. Chen, K. Wang, X. Li, Y. Zhang et al., \"A survey on trustworthy llm agents: Threats and countermeasures,\" arXiv preprint arXiv:2503.09648, 2025.", + "[75] X. Ma, Y. Gao, Y. Wang, R. Wang, X. Wang, Y. Sun, Y. Ding, H. Xu, Y. Chen, Y. Zhao, H. Huang, Y. Li, J. Zhang, X. Zheng, Y. Bai, Z. Wu, X. Qiu, J. Zhang, Y. Li, J. Sun, C. Wang, J. Gu, B. Wu, S. Chen, T. Zhang, Y. Liu, M. Gong, T. Liu, S. Pan, C. Xie, T. Pang, Y. Dong, R. Jia, Y. Zhang, S. Ma, X. Zhang, N. Gong, C. Xiao, S. Erfani, B. Li, M. Sugiyama, D. Tao, J. Bailey, and Y.-G. Jiang, \"Safety at scale: A comprehensive survey of large model safety,\" 2025. [Online]. Available: https://arxiv.org/abs/2502.05206", + "[76] Y. Huang, L. Sun, H. Wang, S. Wu, Q. Zhang, Y. Li, C. Gao, Y. Huang, W. Lyu, Y. Zhang et al., \"Position: Trustllm: Trustworthiness in large language models,\" in International Conference on Machine Learning. PMLR, 2024, pp. 20166-20270.", + "[77] Z. Dong, Z. Zhou, C. Yang, J. Shao, and Y. Qiao, \"Attacks, defenses and evaluations for llm conversation safety: A survey,\" arXiv preprint arXiv:2402.09283, 2024.", + "[78] G. Penedo, Q. Malartic, D. Hesslow, R. Cojocaru, A. Cappelli, H. Alobeidli, B. Pannier, E. Almazrouei, and J. Launay, \"The refined web dataset for falcon llm: outperforming curated corpora with web data, and web data only,\" arXiv preprint arXiv:2306.01116, 2023." + ], + "bbox": [ + 506, + 53, + 921, + 941 + ], + "page_idx": 37 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 37 + }, + { + "type": "page_number", + "text": "38", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 37 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[79] L. Soldaini, R. Kinney, A. Bhagia, D. Schwenk, D. Atkinson, R. Authur, B. Bogin, K. Chandu, J. Dumas, Y. Elazar et al., \"Dolma: An open corpus of three trillion tokens for language model pretraining research,\" arXiv preprint arXiv:2402.00159, 2024.", + "[80] J. Kaddour, J. Harris, M. Mozes, H. Bradley, R. Raileanu, and R. McHardy, \"Challenges and applications of large language models,\" arXiv preprint arXiv:2307.10169, 2023.", + "[81] W. Sun, Y. Chen, G. Tao, C. Fang, X. Zhang, Q. Zhang, and B. Luo, \"Backdooring neural code search,\" in Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics. Toronto, Canada: Association for Computational Linguistics, July 9-14 2023, pp. 9692-9708.", + "[82] W. Sun, Y. Chen, M. Yuan, C. Fan, Z. Chen, C. Wang, Y. Liu, B. Xu, and Z. Chen, \"Show me your code! kill code poisoning: A lightweight method based on code naturalness,\" in Proceedings of the IEEE/ACM 47th International Conference on Software Engineering. Ottawa, Ontario, Canada: IEEE Computer Society, Sun 27 April - Sat 3 May 2025, pp. 1-13.", + "[83] N. Carlini, M. Jagielski, C. A. Choquette-Choo, D. Paleka, W. Pearce, H. Anderson, A. Terzis, K. Thomas, and F. Tramèr, \"Poisoning web-scale training datasets is practical,\" in 2024 IEEE Symposium on Security and Privacy (SP). IEEE, 2024, pp. 407-425.", + "[84] Y. Zhang, J. Rando, I. Evtimov, J. Chi, E. M. Smith, N. Carlini, F. Tramér, and D. Ippolito, \"Persistent pre-training poisoning of llms,\" arXiv preprint arXiv:2410.13722, 2024.", + "[85] E. Wallace, T. Z. Zhao, S. Feng, and S. Singh, \"Concealed data poisoning attacks on nlp models,\" arXiv preprint arXiv:2010.12563, 2020.", + "[86] B. Yan, K. Li, M. Xu, Y. Dong, Y. Zhang, Z. Ren, and X. Cheng, \"On protecting the data privacy of large language models (llms): A survey,\" arXiv preprint arXiv:2403.05156, 2024.", + "[87] N. Kandpal, E. Wallace, and C. Raffel, \"Deduplicating training data mitigates privacy risks in language models,\" in International Conference on Machine Learning. PMLR, 2022, pp. 10697-10707.", + "[88] N. Carlini, D. Ippolito, M. Jagielski, K. Lee, F. Tramer, and C. Zhang, “Quantifying memorization across neural language models,” in The Eleventh International Conference on Learning Representations, 2022.", + "[89] C. Arnett, E. Jones, I. P. Yamshchikov, and P.-C. Langlais, \"Toxicity of the commons: Curating open-source pre-training data,\" arXiv preprint arXiv:2410.22587, 2024.", + "[90] K. Lee, D. Ippolito, A. Nystrom, C. Zhang, D. Eck, C. Callison-Burch, and N. Carlini, “Deduplicating training data makes language models better,” arXiv preprint arXiv:2107.06499, 2021.", + "[91] Y. Li, Y. Jiang, Z. Li, and S. Xia, \"Backdoor learning: A survey.\" IEEE Transactions on Neural Networks and Learning Systems, vol. 35, no. 1, pp. 5-22, 2024.", + "[92] Y. Zeng, M. Pan, H. Jahagirdar, M. Jin, L. Lyu, and R. Jia, \"How to sift out a clean data subset in the presence of data poisoning?\" arXiv preprint arXiv:2210.06516, 2022." + ], + "bbox": [ + 76, + 53, + 491, + 941 + ], + "page_idx": 38 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[93] M. Pan, Y. Zeng, L. Lyu, X. Lin, and R. Jia, “{ASSET}: Robust backdoor data detection across a multiplicity of deep learning paradigms,” in 32nd USENIX Security Symposium (USENIX Security 23), 2023, pp. 2725–2742.", + "[94] Z. Zhang, L. Lyu, W. Wang, L. Sun, and X. Sun, \"How to inject backdoors with better consistency: Logit anchoring on clean data,\" in International Conference on Learning Representations, 2022.", + "[95] Z. Zhang, L. Lyu, X. Ma, C. Wang, and X. Sun, \"Fine-mixing: Mitigating backdoors in fine-tuned language models,\" arXiv preprint arXiv:2210.09545, 2022.", + "[96] X. Sun, X. Li, Y. Meng, X. Ao, L. Lyu, J. Li, and T. Zhang, \"Defending against backdoor attacks in natural language generation,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 37, no. 4, 2023, pp. 5257-5265.", + "[97] S. Longpre, G. Yauney, E. Reif, K. Lee, A. Roberts, B. Zoph, D. Zhou, J. Wei, K. Robinson, D. Mimno et al., \"A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity,\" in Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), 2024, pp. 3245-3276.", + "[98] S. Neel and P. Chang, \"Privacy issues in large language models: A survey,\" arXiv preprint arXiv:2312.06717, 2023.", + "[99] X. Wu, R. Duan, and J. Ni, \"Unveiling security, privacy, and ethical concerns of chatgpt,\" Journal of Information and Intelligence, vol. 2, no. 2, pp. 102-115, 2024.", + "[100] M. Gupta, C. Akiri, K. Aryal, E. Parker, and L. Praharaj, \"From chatgpt to threatgpt: Impact of generative ai in cybersecurity and privacy,\" IEEE Access, vol. 11, pp. 80218-80245, 2023.", + "[101] M. Miranda, E. S. Ruzzetti, A. Santilli, F. M. Zanzotto, S. Bratières, and E. Rodolà, “Preserving privacy in large language models: A survey on current threats and solutions,” arXiv preprint arXiv:2408.05212, 2024.", + "[102] Q. Zhang, H. Qiu, D. Wang, Y. Li, T. Zhang, W. Zhu, H. Weng, L. Yan, and C. Zhang, “A benchmark for semantic sensitive information in llms outputs,” in The Thirteenth International Conference on Learning Representations, 2025.", + "[103] S. Kim, S. Yun, H. Lee, M. Gubri, S. Yoon, and S. J. Oh, \"Propile: C,\" Advances in Neural Information Processing Systems, vol. 36, pp. 20750-20762, 2023.", + "[104] H. Li, D. Guo, W. Fan, M. Xu, J. Huang, F. Meng, and Y. Song, \"Multi-step jailbreaking privacy attacks on chatgpt,\" arXiv preprint arXiv:2304.05197, 2023.", + "[105] M. S. Ozdayi, C. Peris, J. FitzGerald, C. Dupuy, J. Majmudar, H. Khan, R. Parikh, and R. Gupta, \"Controlling the extraction of memorized data from large language models via prompt-tuning,\" arXiv preprint arXiv:2305.11759, 2023.", + "[106] N. Carlini, C. Liu, U. Erlingsson, J. Kos, and D. Song, \"The secret sharer: Evaluating and testing unintended memorization in neural networks,\" in 28th USENIX security symposium (USENIX security 19), 2019, pp. 267-284.", + "[107] M. Nasr, N. Carlini, J. Hayase, M. Jagielski, A. F. Cooper, D. Ippolito, C. A. Choquette-Choo, E. Wallace," + ], + "bbox": [ + 506, + 53, + 921, + 941 + ], + "page_idx": 38 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 38 + }, + { + "type": "page_number", + "text": "39", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 38 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "F. Tramér, and K. Lee, \"Scalable extraction of training data from (production) language models,\" arXiv preprint arXiv:2311.17035, 2023.", + "[108] N. Carlini, F. Tramer, E. Wallace, M. Jagielski, A. Herbert-Voss, K. Lee, A. Roberts, T. Brown, D. Song, U. Erlingsson et al., \"Extracting training data from large language models,\" in 30th USENIX security symposium (USENIX Security 21), 2021, pp. 2633-2650.", + "[109] Y. Bai, G. Pei, J. Gu, Y. Yang, and X. Ma, \"Special characters attack: Toward scalable training data extraction from large language models,\" arXiv preprint arXiv:2405.05990, 2024.", + "[110] Z. Zhou, J. Xiang, C. Chen, and S. Su, “Quantifying and analyzing entity-level memorization in large language models,” in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 17, 2024, pp. 19741-19749.", + "[111] X. Yang, Z. Wen, W. Qu, Z. Chen, Z. Xiang, B. Chen, and H. Yao, “Memorization and privacy risks in domain-specific large language models,” in ICLR 2024 Workshop on Reliable and Responsible Foundation Models, 2024.", + "[112] R. Shokri, M. Stronati, C. Song, and V. Shmatikov, \"Membership inference attacks against machine learning models,\" in 2017 IEEE symposium on security and privacy (SP). IEEE, 2017, pp. 3-18.", + "[113] H. Hu, Z. Salcic, L. Sun, G. Dobbie, P. S. Yu, and X. Zhang, \"Membership inference attacks on machine learning: A survey,\" ACM Computing Surveys (CSUR), vol. 54, no. 11s, pp. 1-37, 2022.", + "[114] N. Carlini, S. Chien, M. Nasr, S. Song, A. Terzis, and F. Tramer, \"Membership inference attacks from first principles,\" in 2022 IEEE symposium on security and privacy (SP). IEEE, 2022, pp. 1897-1914.", + "[115] J. Ye, A. Maddi, S. K. Murakonda, V. Bindschaedler, and R. Shokri, \"Enhanced membership inference attacks against machine learning models,\" in Proceedings of the 2022 ACM SIGSAC Conference on Computer and Communications Security, 2022, pp. 3093-3106.", + "[116] J. Zhang, D. Das, G. Kamath, and F. Tramère, \"Membership inference attacks cannot prove that a model was trained on your data,\" arXiv preprint arXiv:2409.19798, 2024.", + "[117] M. Duan, A. Suri, N. Mireshghallah, S. Min, W. Shi, L. Zettlemoyer, Y. Tsvetkov, Y. Choi, D. Evans, and H. Hajishirzi, \"Do membership inference attacks work on large language models?\" arXiv preprint arXiv:2402.07841, 2024.", + "[118] M. Meeus, I. Shilov, S. Jain, M. Faysse, M. Rei, and Y.-A. de Montjoye, \"Sok: Membership inference attacks on llms are rushing nowhere (and how to fix it),\" arXiv preprint arXiv:2406.17975, 2024.", + "[119] Y. He, B. Li, Y. Wang, M. Yang, J. Wang, H. Hu, and X. Zhao, \"Is difficulty calibration all we need? towards more practical membership inference attacks,\" in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 1226-1240.", + "[120] Y. He, B. Li, L. Liu, Z. Ba, W. Dong, Y. Li, Z. Qin, K. Ren, and C. Chen, \"Towards label-only membership inference attack against pre-trained large lan" + ], + "bbox": [ + 76, + 53, + 491, + 943 + ], + "page_idx": 39 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "guage models,\" in USENIX Security, 2025.", + "[121] J. Ren, K. Chen, C. Chen, V. Sehwag, Y. Xing, J. Tang, and L. Lyu, \"Self-comparison for dataset-level membership inference in large (vision-) language model,\" in Proceedings of the ACM on Web Conference 2025, 2025, pp. 910-920.", + "[122] A. Albalak, Y. Elazar, S. M. Xie, S. Longpre, N. Lambert, X. Wang, N. Muennighoff, B. Hou, L. Pan, H. Jeong et al., \"A survey on data selection for language models,\" arXiv preprint arXiv:2402.16827, 2024.", + "[123] P. Maini, S. Goyal, D. Sam, A. Robey, Y. Savani, Y. Jiang, A. Zou, Z. C. Lipton, and J. Z. Kolter, \"Safety pretraining: Toward the next generation of safe ai,\" arXiv preprint arXiv:2504.16980, 2025.", + "[124] A. Hurst, A. Lerer, A. P. Goucher, A. Perelman, A. Ramesh, A. Clark, A. Ostrow, A. Welihinda, A. Hayes, A. Radford et al., \"Gpt-4o system card,\" arXiv preprint arXiv:2410.21276, 2024.", + "[125] S. Li, F. Liu, L. Cui, J. Lu, Q. Xiao, X. Yang, P. Liu, K. Sun, Z. Ma, and X. Wang, \"Safe planner: Empowering safety awareness in large pre-trained models for robot task planning,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 39, no. 14, 2025, pp. 14619-14627.", + "[126] J. O'Neill, S. Subramanian, E. Lin, A. Satish, and V. Mugunthan, \"Guardformer: Guardrail instruction pretraining for efficient safeguarding,\" in Neurips Safe Generative AI Workshop 2024.", + "[127] T. Huang, S. Hu, F. Ilhan, S. F. Tekin, and L. Liu, \"Harmful fine-tuning attacks and defenses for large language models: A survey,\" arXiv preprint arXiv:2409.18169, 2024.", + "[128] M. Shu, J. Wang, C. Zhu, J. Geiping, C. Xiao, and T. Goldstein, \"On the exploitability of instruction tuning,\" Advances in Neural Information Processing Systems, vol. 36, pp. 61-836-61-856, 2023.", + "[129] J. Xu, M. D. Ma, F. Wang, C. Xiao, and M. Chen, \"Instructions as backdoors: Backdoor vulnerabilities of instruction tuning for large language models,\" arXiv preprint arXiv:2305.14710, 2023.", + "[130] J. Yan, V. Yadav, S. Li, L. Chen, Z. Tang, H. Wang, V. Srinivasan, X. Ren, and H. Jin, \"Backdooring instruction-tuned large language models with virtual prompt injection,\" arXiv preprint arXiv:2307.16888, 2023.", + "[131] H. Yao, J. Lou, and Z. Qin, \"Poisonprompt: Backdoor attack on prompt-based large language models,\" in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 7745-7749.", + "[132] S. Zhao, J. Wen, L. A. Tuan, J. Zhao, and J. Fu, \"Prompt as triggers for backdoor attack: Examining the vulnerability in language models,\" arXiv preprint arXiv:2305.01219, 2023.", + "[133] Z. Han, C. Gao, J. Liu, J. Zhang, and S. Q. Zhang, \"Parameter-efficient fine-tuning for large models: A comprehensive survey,\" arXiv preprint arXiv:2403.14608, 2024.", + "[134] L. Xu, H. Xie, S.-Z. J. Qin, X. Tao, and F. L. Wang, \"Parameter-efficient fine-tuning methods for pretrained language models: A critical review and" + ], + "bbox": [ + 506, + 53, + 921, + 943 + ], + "page_idx": 39 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 39 + }, + { + "type": "page_number", + "text": "40", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 39 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "assessment,\" arXiv preprint arXiv:2312.12148, 2023.", + "[135] N. Ding, Y. Qin, G. Yang, F. Wei, Z. Yang, Y. Su, S. Hu, Y. Chen, C.-M. Chan, W. Chen et al., \"Parameter-efficient fine-tuning of large-scale pre-trained language models,\" Nature Machine Intelligence, vol. 5, no. 3, pp. 220-235, 2023.", + "[136] S. Zhao, L. Gan, L. A. Tuan, J. Fu, L. Lyu, M. Jia, and J. Wen, \"Defending against weight-poisoning backdoor attacks for parameter-efficient fine-tuning,\" arXiv preprint arXiv:2402.12168, 2024.", + "[137] J. Kim, M. Song, S. H. Na, and S. Shin, \"Obliviate: Neutralizing task-agnostic backdoors within the parameter-efficient fine-tuning paradigm,\" arXiv preprint arXiv:2409.14119, 2024.", + "[138] S. Jiang, S. R. Kadhe, Y. Zhou, F. Ahmed, L. Cai, and N. Baracaldo, \"Turning generative models degenerate: The power of data poisoning attacks,\" arXiv preprint arXiv:2407.12281, 2024.", + "[139] T. Li, A. K. Sahu, A. Talwalkar, and V. Smith, \"Federated learning: Challenges, methods, and future directions,\" IEEE signal processing magazine, vol. 37, no. 3, pp. 50-60, 2020.", + "[140] C. Zhang, Y. Xie, H. Bai, B. Yu, W. Li, and Y. Gao, \"A survey on federated learning,\" Knowledge-Based Systems, vol. 216, p. 106775, 2021.", + "[141] L. Li, Y. Fan, M. Tse, and K.-Y. Lin, \"A review of applications in federated learning,\" Computers & Industrial Engineering, vol. 149, p. 106854, 2020.", + "[142] Z. Wang, Z. Shen, Y. He, G. Sun, H. Wang, L. Lyu, and A. Li, \"Flora: Federated fine-tuning large language models with heterogeneous low-rank adaptations,\" arXiv preprint arXiv:2409.05976, 2024.", + "[143] C. Chen, X. Feng, Y. Li, L. Lyu, J. Zhou, X. Zheng, and J. Yin, \"Integration of large language models and federated learning,\" *Patterns*, vol. 5, no. 12, 2024.", + "[144] W. Zhuang, C. Chen, and L. Lyu, \"When foundation model meets federated learning: Motivations, challenges, and future directions,\" arXiv preprint arXiv:2306.15546, 2023.", + "[145] G. Sun, Y. Cong, J. Dong, Q. Wang, L. Lyu, and J. Liu, \"Data poisoning attacks on federated machine learning,\" IEEE Internet of Things Journal, vol. 9, no. 13, pp. 11365-11375, 2021.", + "[146] L. Lyu, H. Yu, X. Ma, C. Chen, L. Sun, J. Zhao, Q. Yang, and P. S. Yu, \"Privacy and robustness in federated learning: Attacks and defenses,\" IEEE transactions on neural networks and learning systems, vol. 35, no. 7, pp. 8726-8746, 2022.", + "[147] R. Ye, J. Chai, X. Liu, Y. Yang, Y. Wang, and S. Chen, \"Emerging safety attack and defense in federated instruction tuning of large language models,\" arXiv preprint arXiv:2406.10630, 2024.", + "[148] Z. Zhang, A. Panda, L. Song, Y. Yang, M. Mahoney, P. Mittal, R. Kannan, and J. Gonzalez, \"Neurotoxin: Durable backdoors in federated learning,\" in International Conference on Machine Learning. PMLR, 2022, pp. 26429-26446.", + "[149] T. Fu, M. Sharma, P. Torr, S. B. Cohen, D. Krueger, and F. Berez, “Poisonbench: Assessing large language model vulnerability to data poisoning,” arXiv preprint arXiv:2410.08811, 2024." + ], + "bbox": [ + 76, + 53, + 491, + 941 + ], + "page_idx": 40 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[150] P. Pathmanathan, S. Chakraborty, X. Liu, Y. Liang, and F. Huang, \"Is poisoning a real threat to llm alignment? maybe more so than you think,\" arXiv preprint arXiv:2406.12091, 2024.", + "[151] A. Wan, E. Wallace, S. Shen, and D. Klein, “Poisoning language models during instruction tuning,” in International Conference on Machine Learning. PMLR, 2023, pp. 35413-35425.", + "[152] J. Rando and F. Tramer, \"Universal jailbreak backdoors from poisoned human feedback,\" arXiv preprint arXiv:2311.14455, 2023.", + "[153] T. Baumgartner, Y. Gao, D. Alon, and D. Metzler, \"Best-of-venom: Attacking rlhf by injecting poisoned preference data,\" arXiv preprint arXiv:2404.05530, 2024.", + "[154] B. Chen, H. Guo, G. Wang, Y. Wang, and Q. Yan, \"The dark side of human feedback: Poisoning large language models via user inputs,\" arXiv preprint arXiv:2409.00787, 2024.", + "[155] Y. Bai, A. Jones, K. Ndousse, A. Askell, A. Chen, N. DasSarma, D. Drain, S. Fort, D. Ganguli, T. Henighan et al., \"Training a helpful and harmless assistant with reinforcement learning from human feedback,\" arXiv preprint arXiv:2204.05862, 2022.", + "[156] H. Dong, W. Xiong, B. Pang, H. Wang, H. Zhao, Y. Zhou, N. Jiang, D. Sahoo, C. Xiong, and T. Zhang, \"Rlhf workflow: From reward modeling to online rlhf,\" arXiv preprint arXiv:2405.07863, 2024.", + "[157] W. Xiong, H. Dong, C. Ye, Z. Wang, H. Zhong, H. Ji, N. Jiang, and T. Zhang, \"Iterative preference learning from human feedback: Bridging theory and practice for rlhf under kl-constraint,\" arXiv preprint arXiv:2312.11456, 2023.", + "[158] H. Lee, S. Phatale, H. Mansoor, K. R. Lu, T. Mesnard, J. Ferret, C. Bishop, E. Hall, V. Carbune, and A. Rastogi, \"Rlaif: Scaling reinforcement learning from human feedback with ai feedback,\" 2023.", + "[159] R. Rafailov, A. Sharma, E. Mitchell, C. D. Manning, S. Ermon, and C. Finn, \"Direct preference optimization: Your language model is secretly a reward model,\" Advances in Neural Information Processing Systems, vol. 36, pp. 53728-53741, 2023.", + "[160] J. Wang, J. Wu, M. Chen, Y. Vorobeychik, and C. Xiao, \"Rlhfpoison: Reward poisoning attack for reinforcement learning with human feedback in large language models,\" arXiv preprint arXiv:2311.09641, 2023.", + "[161] S. Gunasekar, Y. Zhang, J. Aneja, C. C. T. Mendes, A. Del Giorno, S. Gopi, M. Javaheripi, P. Kauffmann, G. de Rosa, O. Saarikivi et al., \"Textbooks are all you need,\" arXiv preprint arXiv:2306.11644, 2023.", + "[162] Y. Li, S. Bubeck, R. Eldan, A. Del Giorno, S. Gunasekar, and Y. T. Lee, \"Textbooks are all you need ii: phi-1.5 technical report,\" arXiv preprint arXiv:2309.05463, 2023.", + "[163] J. Zhan, J. Dai, J. Ye, Y. Zhou, D. Zhang, Z. Liu, X. Zhang, R. Yuan, G. Zhang, L. Li et al., \"Anygpt: Unified multimodal llm with discrete sequence modeling,\" arXiv preprint arXiv:2402.12226, 2024.", + "[164] H. Wang, C. Liu, N. Xi, Z. Qiang, S. Zhao, B. Qin, and T. Liu, \"Huatuo: Tuning llama model with chinese medical knowledge,\" arXiv preprint arXiv:2304.06975, 2023." + ], + "bbox": [ + 506, + 53, + 921, + 941 + ], + "page_idx": 40 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 40 + }, + { + "type": "page_number", + "text": "41", + "bbox": [ + 906, + 32, + 919, + 42 + ], + "page_idx": 40 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[165] P. Sutanto, J. Santoso, E. I. Setiawan, and A. P. Wibawa, \"Llm distillation for efficient few-shot multiple choice question answering,\" arXiv preprint arXiv:2412.09807, 2024.", + "[166] X. Zhu, J. Li, Y. Liu, C. Ma, and W. Wang, \"Distilling mathematical reasoning capabilities into small language models,\" Neural Networks, vol. 179, p. 106594, 2024.", + "[167] R. Xu, H. Cui, Y. Yu, X. Kan, W. Shi, Y. Zhuang, W. Jin, J. Ho, and C. Yang, \"Knowledge-infused prompting: Assessing and advancing clinical text data generation with large language models,\" arXiv preprint arXiv:2311.00287, 2023.", + "[168] N. Crispino, K. Montgomery, F. Zeng, D. Song, and C. Wang, \"Agent instructs large language models to be general zero-shot reasoners,\" arXiv preprint arXiv:2310.03710, 2023.", + "[169] C. Li, C. Zhang, Y. Lu, J. Zhang, Q. Sun, X. Wang, J. Wei, G. Wang, Y. Yang, and H. T. Shen, \"Syzygy of thoughts: Improving llm cot with the minimal free resolution,\" arXiv preprint arXiv:2504.09566, 2025.", + "[170] Z. Chen, K. Liu, Q. Wang, W. Zhang, J. Liu, D. Lin, K. Chen, and F. Zhao, \"Agent-flan: Designing data and methods of effective agent tuning for large language models,\" arXiv preprint arXiv:2403.12881, 2024.", + "[171] C. Xu, Q. Sun, K. Zheng, X. Geng, P. Zhao, J. Feng, C. Tao, and D. Jiang, \"Wizardlm: Empowering large language models to follow complex instructions,\" arXiv preprint arXiv:2304.12244, 2023.", + "[172] S. Mukherjee, A. Mitra, G. Jawahar, S. Agarwal, H. Palangi, and A. Awadallah, \"Orca: Progressive learning from complex explanation traces of gpt-4,\" arXiv preprint arXiv:2306.02707, 2023.", + "[173] Y. Wang, Y. Kordi, S. Mishra, A. Liu, N. A. Smith, D. Khashabi, and H. Hajishirzi, \"Self-instruct: Aligning language models with self-generated instructions,\" arXiv preprint arXiv:2212.10560, 2022.", + "[174] R. Ri, S. Kiyono, and S. Takase, \"Self-translatabrain: Enhancing cross-lingual transfer of large language models via inherent capability,\" arXiv preprint arXiv:2407.00454, 2024.", + "[175] J. Ji, M. Liu, J. Dai, X. Pan, C. Zhang, C. Bian, B. Chen, R. Sun, Y. Wang, and Y. Yang, \"Beavertails: Towards improved safety alignment of llm via a human-preference dataset,\" Advances in Neural Information Processing Systems, vol. 36, pp. 24678-24704, 2023.", + "[176] H. Lightman, V. Kosaraju, Y. Burda, H. Edwards, B. Baker, T. Lee, J. Leike, J. Schulman, I. Sutskever, and K. Cobbe, \"Let's verify step by step,\" in The Twelfth International Conference on Learning Representations, 2023.", + "[177] R. Nakano, J. Hilton, S. Balaji, J. Wu, L. Ouyang, C. Kim, C. Hesse, S. Jain, V. Kosaraju, W. Saunders et al., \"Webgpt: Browser-assisted question-answering with human feedback,\" arXiv preprint arXiv:2112.09332, 2021.", + "[178] C. Chen, J. Fu, and L. Lyu, \"A pathway towards responsible ai generated content,\" arXiv preprint arXiv:2303.01325, 2023.", + "[179] A. Akkus, M. P. Aghdam, M. Li, J. Chu, M. Backes, Y. Zhang, and S. Sav, \"Generated data with fake privacy: Hidden dangers of fine-tuning large lan" + ], + "bbox": [ + 76, + 53, + 491, + 943 + ], + "page_idx": 41 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "guage models on generated data,\" arXiv preprint arXiv:2409.11423, 2024.", + "[180] Y. Song, J. Zhang, Z. Tian, Y. Yang, M. Huang, and D. Li, \"Llm-based privacy data augmentation guided by knowledge distillation with a distribution tutor for medical text classification,\" arXiv preprint arXiv:2402.16515, 2024.", + "[181] A. Kang, J. Y. Chen, Z. Lee-Youngzie, and S. Fu, \"Synthetic data generation with llm for improved depression prediction,\" arXiv preprint arXiv:2411.17672, 2024.", + "[182] A. Taubenfeld, Y. Dover, R. Reichart, and A. Goldstein, \"Systematic biases in llm simulations of debates,\" arXiv preprint arXiv:2402.04049, 2024.", + "[183] A. Mishra, G. Nayak, S. Bhattacharya, T. Kumar, A. Shah, and M. Foltin, \"Llm-guided counterfactual data generation for fairer ai,\" in Companion Proceedings of the ACM Web Conference 2024, 2024, pp. 1538-1545.", + "[184] Y. Yu, Y. Zhuang, J. Zhang, Y. Meng, A. J. Ratner, R. Krishna, J. Shen, and C. Zhang, \"Large language model as attributed training data generator: A tale of diversity and bias,\" Advances in Neural Information Processing Systems, vol. 36, pp. 55734-55784, 2023.", + "[185] A. Borah and R. Mihalcea, \"Towards implicit bias detection and mitigation in multi-agent lvm interactions,\" arXiv preprint arXiv:2410.02584, 2024.", + "[186] X. Dong, Y. Wang, P. S. Yu, and J. Caverlee, \"Disclosure and mitigation of gender bias in llms,\" arXiv preprint arXiv:2402.11190, 2024.", + "[187] I. M. Serouis and F. Sèdes, “Exploring large language models for bias mitigation and fairness,” in 1st International Workshop on AI Governance (AIGOV) in conjunction with the Thirty-Third International Joint Conference on Artificial Intelligence, 2024.", + "[188] Y. Chen, Q. Fu, Y. Yuan, Z. Wen, G. Fan, D. Liu, D. Zhang, Z. Li, and Y. Xiao, \"Hallucination detection: Robustly discerning reliable answers in large language models,\" in Proceedings of the 32nd ACM International Conference on Information and Knowledge Management, 2023, pp. 245-255.", + "[189] N. Chakraborty, M. Ornik, and K. Driggs-Campbell, \"Hallucination detection in foundation models for decision-making: A flexible definition and review of the state of the art,\" ACM Computing Surveys, 2025.", + "[190] E. Entezami and A. Naseh, \"Llm misalignment via adversarial rlhf platforms,\" arXiv preprint arXiv:2503.03039, 2025.", + "[191] J. Achiam, S. Adler, S. Agarwal, L. Ahmad, I. Akkaya, F. L. Aleman, D. Almeida, J. Altenschmidt, S. Altman, S. Anadkat et al., \"Gpt-4 technical report,\" arXiv preprint arXiv:2303.08774, 2023.", + "[192] A. Young, B. Chen, C. Li, C. Huang, G. Zhang, G. Zhang, G. Wang, H. Li, J. Zhu, J. Chen et al., \"Yi: Open foundation models by 01. ai,\" arXiv preprint arXiv:2403.04652, 2024.", + "[193] A. Dubey, A. Jauhri, A. Pandey, A. Kadian, A. Al-Dahle, A. Letman, A. Mathur, A. Schelten, A. Yang, A. Fan et al., \"The llama 3 herd of models,\" arXiv preprint arXiv:2407.21783, 2024.", + "[194] Z. Cai, M. Cao, H. Chen, K. Chen, K. Chen, X. Chen, X. Chen, Z. Chen, Z. Chen, P. Chu et al., \"InternlM2" + ], + "bbox": [ + 506, + 53, + 921, + 941 + ], + "page_idx": 41 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 41 + }, + { + "type": "page_number", + "text": "42", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 41 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "technical report,\" arXiv preprint arXiv:2403.17297, 2024.", + "[195] R. Anil, A. M. Dai, O. First, M. Johnson, D. Lepikhin, A. Passos, S. Shakeri, E. Taropa, P. Bailey, Z. Chen et al., \"Palm 2 technical report,\" arXiv preprint arXiv:2305.10403, 2023.", + "[196] T. GLM, A. Zeng, B. Xu, B. Wang, C. Zhang, D. Yin, D. Zhang, D. Rojas, G. Feng, H. Zhao et al., \"Chatglm: A family of large language models from glm-130b to glm-4 all tools,\" arXiv preprint arXiv:2406.12793, 2024.", + "[197] G. Team, R. Anil, S. Borgeaud, J.-B. Alayrac, J. Yu, R. Soricut, J. Schalkwyk, A. M. Dai, A. Hauth, K. Millican et al., \"Gemini: a family of highly capable multimodal models,\" arXiv preprint arXiv:2312.11805, 2023.", + "[198] G. Team, T. Mesnard, C. Hardin, R. Dadashi, S. Bhupatiraju, S. Pathak, L. Sifre, M. Rivière, M. S. Kale, J. Love et al., \"Gemma: Open models based on gemini research and technology,\" arXiv preprint arXiv:2403.08295, 2024.", + "[199] D. Groeneveld, I. Beltagy, P. Walsh, A. Bhagia, R. Kinney, O. Tafjord, A. H. Jha, H. Ivison, I. Magnusson, Y. Wang et al., \"Olmo: Accelerating the science of language models,\" arXiv preprint arXiv:2402.00838, 2024.", + "[200] B. Adler, N. Agarwal, A. Aithal, D. H. Anh, P. Bhattacharya, A. Brundyn, J. Casper, B. Catanzaro, S. Clay, J. Cohen et al., \"Nemotron-4 340b technical report,\" arXiv preprint arXiv:2406.11704, 2024.", + "[201] A. Jaech, A. Kalai, A. Lerer, A. Richardson, A. El-Kishky, A. Low, A. Helyar, A. Madry, A. Beutel, A. Carney et al., \"Openai o1 system card,\" arXiv preprint arXiv:2412.16720, 2024.", + "[202] OpenAI, \"Gpt-4o mini: advancing cost-efficient intelligence,\" 2024, https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence.", + "[203] A. Yang, B. Xiao, B. Wang, B. Zhang, C. Bian, C. Yin, C. Lv, D. Pan, D. Wang, D. Yan et al., \"Baichuan 2: Open large-scale language models,\" arXiv preprint arXiv:2309.10305, 2023.", + "[204] J. Welbl, A. Glaese, J. Uesato, S. Dathathri, J. Mellor, L. A. Hendricks, K. Anderson, P. Kohli, B. Coppin, and P.-S. Huang, \"Challenges in detoxifying language models,\" in Findings of the Association for Computational Linguistics: EMNLP 2021, 2021, pp. 2447-2469.", + "[205] H. Ngo, C. Raterink, J. G. Araújo, I. Zhang, C. Chen, A. Morisot, and N. Frosst, \"Mitigating harm in language models with conditional-likelihood filtration,\" arXiv preprint arXiv:2108.07790, 2021.", + "[206] Y. Chen, W. Cai, L. Wu, X. Li, Z. Xin, and C. Fu, \"Tigerbot: An open multilingual multitask llm,\" arXiv preprint arXiv:2312.08688, 2023.", + "[207] S. Prabhumoye, M. Patwary, M. Shoeybi, and B. Catanzaro, \"Adding instructions during pretraining: Effective way of controlling toxicity in language models,\" in Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics, 2023, pp. 2636-2651.", + "[208] Y. Ge, W. Sun, Y. Lou, C. Fang, Y. Zhang, Y. Li, X. Zhang, Y. Liu, Z. Zhao, and Z. Chen, \"Demonstration attack against in-context learning for code intelligence,\" CoRR, vol. abs/2410.02841, no. 1, pp. 1-17, 2024." + ], + "bbox": [ + 75, + 51, + 491, + 941 + ], + "page_idx": 42 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[209] G. Team, P. Georgiev, V. I. Lei, R. Burnell, L. Bai, A. Gulati, G. Tanzer, D. Vincent, Z. Pan, S. Wang et al., \"Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context,\" arXiv preprint arXiv:2403.05530, 2024.", + "[210] J. Parmar, S. Prabhumoye, J. Jennings, M. Patwary, S. Subramanian, D. Su, C. Zhu, D. Narayanan, A. Jhunjunwala, A. Dattagupta et al., \"Nemotron-4 15b technical report,\" arXiv preprint arXiv:2402.16819, 2024.", + "[211] C. Raffel, N. Shazeer, A. Roberts, K. Lee, S. Narang, M. Matena, Y. Zhou, W. Li, and P. J. Liu, \"Exploring the limits of transfer learning with a unified text-to-text transformer,\" Journal of machine learning research, vol. 21, no. 140, pp. 1-67, 2020.", + "[212] T. Markov, C. Zhang, S. Agarwal, F. E. Nekoul, T. Lee, S. Adler, A. Jiang, and L. Weng, “A holistic approach to undesired content detection in the real world,” in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 37, no. 12, 2023, pp. 15009-15018.", + "[213] A. Dubey, A. Jauhri, A. Pandey, A. Kadian, A. Al-Dahle, A. Letman, A. Mathur, A. Schelten, A. Yang, A. Fan et al., \"The llama 3 herd of models,\" arXiv preprint arXiv:2407.21783, 2024.", + "[214] T. Huang, S. Hu, F. Ilhan, S. F. Tekin, and L. Liu, \"Harmful fine-tuning attacks and defenses for large language models: A survey,\" arXiv preprint arXiv:2409.18169, 2024.", + "[215] J. Wu, Y. Xie, Z. Yang, J. Wu, J. Chen, J. Gao, B. Ding, X. Wang, and X. He, \"Towards robust alignment of language models: Distributionally robustifying direct preference optimization,\" arXiv preprint arXiv:2407.07880, 2024.", + "[216] Z. Xu, S. Vemuri, K. Panaganti, D. Kalathil, R. Jain, and D. Ramachandran, \"Distributionally robust direct preference optimization,\" arXiv preprint arXiv:2502.01930, 2025.", + "[217] J. Dai, X. Pan, R. Sun, J. Ji, X. Xu, M. Liu, Y. Wang, and Y. Yang, \"Safe rlhf: Safe reinforcement learning from human feedback,\" in The Twelfth International Conference on Learning Representations, 2023.", + "[218] C. O. Retzlaff, S. Das, C. Wayllace, P. Mousavi, M. Afshari, T. Yang, A. Saranti, A. Angerschmid, M. E. Taylor, and A. Holzinger, \"Human-in-the-loop reinforcement learning: A survey and position on requirements, challenges, and opportunities,\" Journal of Artificial Intelligence Research, vol. 79, pp. 359-415, 2024.", + "[219] S. Milani, N. Topin, M. Veloso, and F. Fang, \"Explainable reinforcement learning: A survey and comparative review,\" ACM Computing Surveys, vol. 56, no. 7, pp. 1-36, 2024.", + "[220] A. Ahmadian, C. Cremer, M. Galle, M. Fadaee, J. Kreutzer, O. Pietquin, A. Üstün, and S. Hooker, \"Back to basics: Revisiting reinforce style optimization for learning from human feedback in llms,\" arXiv preprint arXiv:2402.14740, 2024.", + "[221] T. Liu, Z. Qin, J. Wu, J. Shen, M. Khalman, R. Joshi, Y. Zhao, M. Saleh, S. Baumgartner, J. Liu et al., \"Lipo: Listwise preference optimization through learning-torank,\" arXiv preprint arXiv:2402.01878, 2024.", + "[222] F. Song, B. Yu, M. Li, H. Yu, F. Huang, Y. Li, and" + ], + "bbox": [ + 506, + 53, + 921, + 943 + ], + "page_idx": 42 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 42 + }, + { + "type": "page_number", + "text": "43", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 42 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "H. Wang, \"Preference ranking optimization for human alignment,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 17, 2024, pp. 18990-18998.", + "[223] Z. Wang, B. Bi, S. K. Pentyala, K. Ramnath, S. Chaudhuri, S. Mehrotra, X.-B. Mao, S. Asur et al., \"A comprehensive survey of llm alignment techniques: Rlhf, rlaif, ppo, dpo and more,\" arXiv preprint arXiv:2407.16216, 2024.", + "[224] T. Huang, S. Hu, F. Ilhan, S. F. Tekin, and L. Liu, \"Lisa: Lazy safety alignment for large language models against harmful fine-tuning attack,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=RPChapuXIC", + "[225] T. Huang, S. Hu, and L. Liu, \"Vaccine: Perturbation-aware alignment for large language models against harmful fine-tuning attack,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=lpXDZKiAnt", + "[226] J. Wang, J. Li, Y. Li, X. Qi, J. Hu, Y. Li, P. McDaniel, M. Chen, B. Li, and C. Xiao, \"Backdooralign: Mitigating fine-tuning based jailbreak attack with backdoor enhanced safety alignment,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=1PcjJ5Evta7", + "[227] F. Bianchi, M. Suzgun, G. Attanasio, P. Rottger, D. Jurafsky, T. Hashimoto, and J. Zou, \"Safety-tuned LLaMAs: Lessons from improving the safety of large language models that follow instructions,\" in The Twelfth International Conference on Learning Representations, 2024. [Online]. Available: https://openreview.net/forum?id=gT5hALch9z", + "[228] H. Shen, P.-Y. Chen, P. Das, and T. Chen, \"SEAL: Safety-enhanced aligned LLM fine-tuning via bilevel data selection,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=VHguhvcoM5", + "[229] R. Tang, J. Yuan, Y. Li, Z. Liu, R. Chen, and X. Hu, \"Setting the trap: Capturing and defeating backdoor threats in plms through honeypots,\" NeurIPS, 2023.", + "[230] C.-Y. Hsu, Y.-L. Tsai, C.-H. Lin, P.-Y. Chen, C.-M. Yu, and C.-Y. Huang, \"Safe loRA: The silver lining of reducing safety risks when finetuning large language models,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=HcifdQZFV", + "[231] R. Hazra, S. Layek, S. Banerjee, and S. Poria, \"Safety arithmetic: A framework for test-time safety alignment of language models by steering parameters and activations,\" in Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, 2024, pp. 21759-21776.", + "[232] Y. Du, S. Zhao, D. Zhao, M. Ma, Y. Chen, L. Huo, Q. Yang, D. Xu, and B. Qin, \"MoGU: A framework for enhancing safety of LLMs while preserving their usability,\" in The Thirty-" + ], + "bbox": [ + 76, + 53, + 491, + 941 + ], + "page_idx": 43 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=SrFbgIjb53", + "[233] X. Yi, S. Zheng, L. Wang, G. de Melo, X. Wang, and L. He, \"Nlsr: Neuron-level safety realignment of large language models against harmful fine-tuning,\" arXiv preprint arXiv:2412.12497, 2024.", + "[234] D. Shi, T. Shen, Y. Huang, Z. Li, Y. Leng, R. Jin, C. Liu, X. Wu, Z. Guo, L. Yu et al., \"Large language model safety: A holistic survey,\" arXiv preprint arXiv:2412.17686, 2024.", + "[235] B. Ni, Z. Liu, L. Wang, Y. Lei, Y. Zhao, X. Cheng, Q. Zeng, L. Dong, Y. Xia, K. Kenthapadi et al., \"Towards trustworthy retrieval augmented generation for large language models: A survey,\" arXiv preprint arXiv:2502.06872, 2025.", + "[236] F. Berez, T. Fu, A. Prabhu, S. Casper, A. Sanyal, A. Bibi, A. O'Gara, R. Kirk, B. Bucknall, T. Fist, L. Ong, P. Torr, K. Lam, R. Trager, D. Krueger, S. Mindermann, J. Hernández-Orallo, M. Geva, and Y. Gal, \"Open problems in machine unlearning for AI safety,\" CoRR, 2025.", + "[237] U. Anwar, A. Saparov, J. Rando, D. Paleka, M. Turpin, P. Hase, E. S. Lubana, E. Jenner, S. Casper, O. Sourbut et al., “Foundational challenges in assuring alignment and safety of large language models,” arXiv preprint arXiv:2404.09932, 2024.", + "[238] X. Qi, Y. Zeng, T. Xie, P.-Y. Chen, R. Jia, P. Mittal, and P. Henderson, \"Fine-tuning aligned language models compromises safety, even when users do not intend to!\" arXiv preprint arXiv:2310.03693, 2023.", + "[239] X. Yang, X. Wang, Q. Zhang, L. Petzold, W. Y. Wang, X. Zhao, and D. Lin, \"Shadow alignment: The ease of subverting safely-aligned language models.(2023),\" arXiv preprint arXiv:2310.02949, 2023.", + "[240] Q. Zhan, R. Fang, R. Bindu, A. Gupta, T. Hashimoto, and D. Kang, \"Removing rlhf protections in gpt-4 via fine-tuning,\" arXiv preprint arXiv:2311.05553, 2023.", + "[241] J. Kazdan, L. Yu, R. Schaeffer, C. Cundy, S. Koyejo, and D. Krishnamurthy, \"No, of course i can! refusal mechanisms can be exploited using harmless finetuning data,\" arXiv preprint arXiv:2502.19537, 2025.", + "[242] D. Halawi, A. Wei, E. Wallace, T. T. Wang, N. Haghtalab, and J. Steinhardt, \"Covert malicious finetuning: Challenges in safeguarding llm adaptation,\" arXiv preprint arXiv:2406.20053, 2024.", + "[243] T. Huang, S. Hu, F. Ilhan, S. F. Tekin, and L. Liu, \"Virus: Harmful fine-tuning attack for large language models bypassing guardrail moderation,\" arXiv preprint arXiv:2501.17433, 2025.", + "[244] Y. Qiang, X. Zhou, S. Z. Zade, M. A. Roshani, P. Khan-duri, D. Zytko, and D. Zhu, \"Learning to poison large language models during instruction tuning,\" arXiv preprint arXiv:2402.13459, 2024.", + "[245] J. Raghuram, G. Kesidis, and D. J. Miller, \"A study of backdoors in instruction fine-tuned language models,\" arXiv preprint arXiv:2406.07778, 2024.", + "[246] J. Yi, R. Ye, Q. Chen, B. Zhu, S. Chen, D. Lian, G. Sun, X. Xie, and F. Wu, \"On the vulnerability of safety alignment in open-access llms,\" in Findings of the Association for Computational Linguistics ACL 2024," + ], + "bbox": [ + 506, + 53, + 921, + 941 + ], + "page_idx": 43 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 43 + }, + { + "type": "page_number", + "text": "44", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 43 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "2024, pp. 9236-9260.", + "[247] S. Lermen, C. Rogers-Smith, and J. Ladish, \"Lora finetuning efficiently undoes safety training in llama 2-chat 70b,\" arXiv preprint arXiv:2310.20624, 2023.", + "[248] L. Piercing, \"Lora-as-an-attack! piercing llm safety under the share-and-play scenario.\"", + "[249] S. Poppi, Z.-X. Yong, Y. He, B. Chern, H. Zhao, A. Yang, and J. Chi, \"Towards understanding the fragility of multilingual llms against fine-tuning attacks,\" arXiv preprint arXiv:2410.18210, 2024.", + "[250] S. Li, E. C.-H. Ngai, F. Ye, and T. Voigt, \"Peft-as-an-attack! jailbreaking language models during federated parameter-efficient fine-tuning,\" arXiv preprint arXiv:2411.19335, 2024.", + "[251] N. Razin, S. Malladi, A. Bhaskar, D. Chen, S. Arora, and B. Hanin, \"Unintentional unalignment: Likelihood displacement in direct preference optimization,\" arXiv preprint arXiv:2410.08847, 2024.", + "[252] R. Xu, Y. Cai, Z. Zhou, R. Gu, H. Weng, Y. Liu, T. Zhang, W. Xu, and H. Qiu, \"Course-correction: Safety alignment using synthetic preferences,\" arXiv preprint arXiv:2407.16637, 2024.", + "[253] J. Ji, B. Chen, H. Lou, D. Hong, B. Zhang, X. Pan, T. A. Qiu, J. Dai, and Y. Yang, \"Aligner: Efficient alignment by learning to correct,\" Advances in Neural Information Processing Systems, vol. 37, pp. 90853-90890, 2024.", + "[254] D. Ganguli, L. Lovitt, J. Kernion, A. Askell, Y. Bai, S. Kadavath, B. Mann, E. Perez, N. Schiefer, K. Ndousse et al., \"Red teaming language models to reduce harms: Methods, scaling behaviors, and lessons learned,\" arXiv preprint arXiv:2209.07858, 2022.", + "[255] T. Xiao, Y. Yuan, H. Zhu, M. Li, and V. G. Honavar, \"Cal-DPO: Calibrated direct preference optimization for language model alignment,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=57OQXxbTbY", + "[256] S. Guo, B. Zhang, T. Liu, T. Liu, M. Khalman, F. Llinares, A. Rame, T. Mesnard, Y. Zhao, B. Piot et al., \"Direct language model alignment from online ai feedback,\" arXiv preprint arXiv:2402.04792, 2024.", + "[257] Z. Liu, X. Sun, and Z. Zheng, \"Enhancing llm safety via constrained direct preference optimization,\" arXiv preprint arXiv:2403.02475, 2024.", + "[258] H. Lee, S. Phatale, H. Mansoor, T. Mesnard, J. Ferret, K. R. Lu, C. Bishop, E. Hall, V. Carbune, A. Rastogi, and S. Prakash, \"RLAIF vs. RLHF: Scaling reinforcement learning from human feedback with AI feedback,\" in *Forty-first International Conference on Machine Learning*, 2024. [Online]. Available: https://openreview.net/forum?id=uydQ2W41KO", + "[259] X. Lu, B. Yu, Y. Lu, H. Lin, H. Yu, L. Sun, X. Han, and Y. Li, \"Sofa: Shielded on-the-fly alignment via priority rule following,\" in Findings of the Association for Computational Linguistics ACL 2024, 2024, pp. 7108-7136.", + "[260] A. Zou, Z. Wang, N. Carlini, M. Nasr, J. Z. Kolter, and M. Fredrikson, \"Universal and transferable adversarial attacks on aligned language models,\" arXiv preprint arXiv:2307.15043, 2023.", + "[261] P. Chao, A. Robey, E. Dobriban, H. Hassani, G. J." + ], + "bbox": [ + 75, + 53, + 491, + 941 + ], + "page_idx": 44 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Pappas, and E. Wong, \"Jailbreaking black box large language models in twenty queries,\" arXiv preprint arXiv:2310.08419, 2023.", + "[262] Z. Zhou, J. Xiang, H. Chen, Q. Liu, Z. Li, and S. Su, \"Speak out of turn: Safety vulnerability of large language models in multi-turn dialogue,\" arXiv preprint arXiv:2402.17262, 2024.", + "[263] Q. Ren, H. Li, D. Liu, Z. Xie, X. Lu, Y. Qiao, L. Sha, J. Yan, L. Ma, and J. Shao, \"Derail yourself: Multi-turn llm jailbreak attack through self-discovered clues,\" arXiv preprint arXiv:2410.10700, 2024.", + "[264] X. Pang, S. Tang, R. Ye, Y. Xiong, B. Zhang, Y. Wang, and S. Chen, \"Self-alignment of large language models via monopolylogue-based social scene simulation,\" in Proceedings of the 41st International Conference on Machine Learning, 2024, pp. 39-46.", + "[265] J. Ji, D. Hong, B. Zhang, B. Chen, J. Dai, B. Zheng, T. Qiu, B. Li, and Y. Yang, \"Pku-saferlhf: Towards multi-level safety alignment for llms with human preference,\" arXiv preprint arXiv:2406.15513, 2024.", + "[266] T. Mu, A. Helyar, J. Heidecke, J. Achiam, A. Vallone, I. D. Kivlichan, M. Lin, A. Beutel, J. Schulman, and L. Weng, \"Rule based rewards for language model safety,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024.", + "[267] X. Tan, S. Shi, X. Qiu, C. Qu, Z. Qi, Y. Xu, and Y. Qi, \"Self-criticism: Aligning large language models with their understanding of helpfulness, honesty, and harmlessness,\" in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track, M. Wang and I. Zitouni, Eds. Singapore: Association for Computational Linguistics, Dec. 2023, pp. 650-662. [Online]. Available: https://aclanthology.org/2023.emnlp-industry.62/", + "[268] M. Y. Guan, M. Joglekar, E. Wallace, S. Jain, B. Barak, A. Heylar, R. Dias, A. Vallone, H. Ren, J. Wei et al., \"Deliberative alignment: Reasoning enables safer language models,\" arXiv preprint arXiv:2412.16339, 2024.", + "[269] B. Wei, K. Huang, Y. Huang, T. Xie, X. Qi, M. Xia, P. Mittal, M. Wang, and P. Henderson, \"Assessing the brittleness of safety alignment via pruning and low-rank modifications,\" in *Forty-first International Conference on Machine Learning*, 2024. [Online]. Available: https://openreview.net/forum?id=K6xxnKN2gm", + "[270] A. Arditi, O. B. Obeso, A. Syed, D. Paleka, N. Rimsky, W. Gurnee, and N. Nanda, \"Refusal in language models is mediated by a single direction,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=pH3XAQME6c", + "[271] R. Ye, J. Chai, X. Liu, Y. Yang, Y. Wang, and S. Chen, \"Emerging safety attack and defense in federated instruction tuning of large language models,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=sYNWqQYJhz", + "[272] J. Mukhoti, Y. Gal, P. Torr, and P. K. Dokania, \"Finetuning can cripple foundation models; preserving features may be the solution,\" 2024. [Online]. Available: https://openreview.net/forum?id=VQ7Q6qdp0P" + ], + "bbox": [ + 506, + 53, + 921, + 941 + ], + "page_idx": 44 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 44 + }, + { + "type": "page_number", + "text": "45", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 44 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[273] Y. Du, S. Zhao, J. Cao, M. Ma, D. Zhao, F. FAN, T. Liu, and B. Qin, \"Towards secure tuning: Mitigating security risks arising from benign instruction fine-tuning,\" 2024. [Online]. Available: https://openreview.net/forum?id=Egd7Vi1EuA", + "[274] J. Li and J.-E. Kim, \"Safety alignment shouldn't be complicated,\" 2025. [Online]. Available: https://openreview.net/forum?id=9H91juqfgb", + "[275] S. Li, L. Yao, L. Zhang, and Y. Li, \"Safety layers in aligned large language models: The key to LLM security,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=kUH1yPMAn7", + "[276] Z. Zhou, H. Yu, X. Zhang, R. Xu, F. Huang, K. Wang, Y. Liu, J. Fang, and Y. Li, \"On the role of attention heads in large language model safety,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=h0Ak8A5yqw", + "[277] M. Li, W. M. Si, M. Backes, Y. Zhang, and Y. Wang, \"SaloRA: Safety-alignment preserved low-rank adaptation,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=GOoVzE9nSj", + "[278] Y. Zong, O. Bohdal, T. Yu, Y. Yang, and T. Hospedales, \"Safety fine-tuning at (almost) no cost: A baseline for vision large language models,\" in *Forty-first International Conference on Machine Learning*, 2024. [Online]. Available: https://openreview.net/forum?id=bWZKvF0g7G", + "[279] F. Eiras, A. Petrov, P. Torr, M. P. Kumar, and A. Bibi, \"Do as i do (safely): Mitigating task-specific fine-tuning risks in large language models,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=IXE5lB6ppV", + "[280] J. Luo, X. Luo, K. Ding, J. Yuan, Z. Xiao, and M. Zhang, \"Robustft: Robust supervised fine-tuning for large language models under noisy response,\" 2024. [Online]. Available: https://arxiv.org/abs/2412.14922", + "[281] K. Lyu, H. Zhao, X. Gu, D. Yu, A. Goyal, and S. Arora, \"Keeping LLMs aligned after finetuning: The crucial role of prompt templates,\" in ICLR 2024 Workshop on Reliable and Responsible Foundation Models, 2024. [Online]. Available: https://openreview.net/forum?id=XlnpQOn95Z", + "[282] P. Hacker, A. Engel, and M. Mauer, \"Regulating chatgpt and other large generative ai models,\" in Proceedings of the 2023 ACM Conference on Fairness, Accountability, and Transparency. Association for Computing Machinery, 2023.", + "[283] M. Kolla, S. Salunkhe, E. Chandrasekharan, and K. Saha, \"Llm-mod: Can large language models assist content moderation?\" in Extended Abstracts of the CHI Conference on Human Factors in Computing Systems. Association for Computing Machinery, 2024.", + "[284] D. Kumar, Y. A. AbuHashem, and Z. Durmeric, \"Watch your language: Investigating content moderation with large language models,\" Proceedings of the International AAAI Conference on Web and Social Media, 2024." + ], + "bbox": [ + 76, + 53, + 491, + 941 + ], + "page_idx": 45 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[285] H. K. Choi, X. Du, and Y. Li, \"Safety-aware finetuning of large language models,\" in Neurips Safe Generative AI Workshop 2024, 2024. [Online]. Available: https://openreview.net/forum?id=SqL94fLSM7", + "[286] H. Ge, Y. Li, Q. Wang, Y. Zhang, and R. Tang, \"When backdoors speak: Understanding llm backdoor attacks through model-generated explanations,\" arXiv preprint arXiv:2411.12701, 2024.", + "[287] B. Yi, T. Huang, S. Chen, T. Li, Z. Liu, Z. Chu, and Y. Li, \"Probe before you talk: Towards black-box defense against backdoor unalignment for large language models,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=EbxYDBhE3S", + "[288] B. Tran, J. Li, and A. Madry, \"Spectral signatures in backdoor attacks,\" in Advances in Neural Information Processing Systems. Curran Associates, Inc., 2018.", + "[289] S. Casper, L. Schulze, O. Patel, and D. Hadfield-Menell, \"Defending against unforeseen failure modes with latent adversarial training,\" 2024. [Online]. Available: https://arxiv.org/abs/2403.05030", + "[290] T. Huang, G. Bhattacharya, P. Joshi, J. Kimball, and L. Liu, \"Antidote: Post-fine-tuning safety alignment for large language models against harmful finetuning,\" 2024. [Online]. Available: https://arxiv.org/abs/2408.09600", + "[291] J. Li, \"Detecting instruction fine-tuning attack on language models with influence function,\" arXiv preprint arXiv:2504.09026, 2025.", + "[292] X. Yi, S. Zheng, L. Wang, X. Wang, and L. He, \"A safety realignment framework via subspace-oriented model fusion for large language models,\" Knowledge-Based Systems, 2024.", + "[293] M. Zhu, Y. Weng, L. Yang, Y. Wei, N. Zhang, and Y. Zhang, \"Locking down the finetuned LLMs safety,\" 2025. [Online]. Available: https://openreview.net/forum?id=YGoFl5KKFc", + "[294] D. Wu, X. Lu, Y. Zhao, and B. Qin, \"Separate the wheat from the chaff: A post-hoc approach to safety re-alignment for fine-tuned language models,\" 2025. [Online]. Available: https://arxiv.org/abs/2412.11041", + "[295] Y. Wang, T. Huang, L. Shen, H. Yao, H. Luo, R. Liu, N. Tan, J. Huang, and D. Tao, \"Panacea: Mitigating harmful fine-tuning for large language models via post-fine-tuning perturbation,\" 2025. [Online]. Available: https://arxiv.org/abs/2501.18100", + "[296] Q. Liu, C. Shang, L. Liu, N. Pappas, J. Ma, N. A. John, S. Doss, L. Marquez, M. Ballesteros, and Y. Benajiba, \"Unraveling and mitigating safety alignment degradation of vision-language models,\" 2025. [Online]. Available: https://openreview.net/forum?id=EEWpE9cR27", + "[297] S. Xu, L. Pang, Y. Zhu, H. Shen, and X. Cheng, \"Cross-modal safety mechanism transfer in large vision-language models,\" arXiv preprint arXiv:2410.12662, 2024.", + "[298] S. Li, L. Yao, L. Zhang, and Y. Li, \"Safety layers in aligned large language models: The key to llm security,\" arXiv preprint arXiv:2408.17003, 2024.", + "[299] W. Zhao, Z. Li, Y. Li, Y. Zhang, and J. Sun, \"Defending large language models against jailbreak" + ], + "bbox": [ + 506, + 53, + 921, + 943 + ], + "page_idx": 45 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 45 + }, + { + "type": "page_number", + "text": "46", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 45 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "attacks via layer-specific editing,\" 2024. [Online]. Available: https://arxiv.org/abs/2405.18166", + "[300] NIST, \"Artificial intelligence risk management framework: Generative artificial intelligence profile (initial public draft),\" 2024, accessed: 2025-05-29. [Online]. Available: https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.800-1.ipd.pdf", + "[301] X. Qi, B. Wei, N. Carlini, Y. Huang, T. Xie, L. He, M. Jagielski, M. Nasr, P. Mittal, and P. Henderson, \"On Evaluating the Durability of Safeguards for Open-Weight LLMs,\" Dec. 2024.", + "[302] D. Rosati, J. Wehner, K. Williams, L. Bartoszcze, R. Gonzales, C. Maple, S. Majumdar, H. Sajjad, and F. Rudzicz, \"Representation Noising: A Defence Mechanism Against Harmful Finetuning,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, Nov. 2024.", + "[303] R. Tamirisa, B. Bharathi, L. Phan, A. Zhou, A. Gatti, T. Suresh, M. Lin, J. Wang, R. Wang, R. Arel, A. Zou, D. Song, B. Li, D. Hendrycks, and M. Mazeika, \"Tamper-Resistant Safeguards for Open-Weight LLMs,\" Feb. 2025.", + "[304] D. Rosati, J. Wehner, K. Williams, L. Bartoszcze, H. Sajjad, and F. Rudzicz, \"Immunization against harmful fine-tuning attacks,\" in Findings of the Association for Computational Linguistics: EMNLP 2024. Association for Computational Linguistics, 2024.", + "[305] M. Mazeika, L. Phan, X. Yin, A. Zou, Z. Wang, N. Mu, E. Sakhaee, N. Li, S. Basart, B. Li et al., \"Harmbench: A standardized evaluation framework for automated red teaming and robust refusal,\" arXiv preprint arXiv:2402.04249, 2024.", + "[306] P. Chao, E. Debenedetti, A. Robey, M. Andriushchenko, F. Croce, V. Sehwag, E. Dobriban, N. Flammarion, G. J. Pappas, F. Tramer et al., \"Jailbreakbench: An open robustness benchmark for jailbreaking large language models,\" arXiv preprint arXiv:2404.01318, 2024.", + "[307] S. Liu, S. Cui, H. Bu, Y. Shang, and X. Zhang, \"Jail-bench: A comprehensive chinese security assessment benchmark for large language models,\" arXiv preprint arXiv:2502.18935, 2025.", + "[308] J. Cui, W.-L. Chiang, I. Stoica, and C.-J. Hsieh, \"Or-bench: An over-refusal benchmark for large language models,\" arXiv preprint arXiv:2405.20947, 2024.", + "[309] T. Xie, X. Qi, Y. Zeng, Y. Huang, U. M. Sehwag, K. Huang, L. He, B. Wei, D. Li, Y. Sheng et al., \"Sorry-bench: Systematically evaluating large language model safety refusal behaviors,\" arXiv preprint arXiv:2406.14598, 2024.", + "[310] L. Zheng, W.-L. Chiang, Y. Sheng, S. Zhuang, Z. Wu, Y. Zhuang, Z. Lin, Z. Li, D. Li, E. Xing et al., \"Judging llm-as-a-judge with mt-bench and chatbot arena,\" Advances in Neural Information Processing Systems, vol. 36, pp. 46595-46623, 2023.", + "[311] Z. Wang, S. Hu, S. Zhao, X. Lin, F. Juefei-Xu, Z. Li, L. Han, H. Subramanyam, L. Chen, J. Chen et al., \"Mllm-as-a-judge for image safety without human labeling,\" arXiv preprint arXiv:2501.00192, 2024.", + "[312] D. Rosati, J. Wehner, K. Williams, L. Bartoszcze, D. Atanasov, R. Gonzales, S. Majumdar, C. Maple," + ], + "bbox": [ + 76, + 53, + 491, + 943 + ], + "page_idx": 46 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "H. Sajjad, and F. Rudzicz, \"Representation noising effectively prevents harmful fine-tuning on llms,\" arXiv e-prints, pp. arXiv-2405, 2024.", + "[313] H. Zhang, J. Huang, K. Mei, Y. Yao, Z. Wang, C. Zhan, H. Wang, and Y. Zhang, \"Agent security bench (ASB): Formalizing and benchmarking attacks and defenses in LLM-based agents,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=V4y0CpX4hK", + "[314] T. Yuan, Z. He, L. Dong, Y. Wang, R. Zhao, T. Xia, L. Xu, B. Zhou, F. Li, Z. Zhang et al., \"R-judge: Benchmarking safety risk awareness for llm agents,\" arXiv preprint arXiv:2401.10019, 2024.", + "[315] Z. Zhang, L. Lei, L. Wu, R. Sun, Y. Huang, C. Long, X. Liu, X. Lei, J. Tang, and M. Huang, \"Safetybench: Evaluating the safety of large language models,\" arXiv preprint arXiv:2309.07045, 2023.", + "[316] L. Li, B. Dong, R. Wang, X. Hu, W. Zuo, D. Lin, Y. Qiao, and J. Shao, \"Salad-bench: A hierarchical and comprehensive safety benchmark for large language models,\" arXiv preprint arXiv:2402.05044, 2024.", + "[317] K. Cobbe, V. Kosaraju, M. Bavarian, M. Chen, H. Jun, L. Kaiser, M. Plappert, J. Tworek, J. Hilton, R. Nakano et al., \"Training verifiers to solve math word problems,\" arXiv preprint arXiv:2110.14168, 2021.", + "[318] S.-Y. Miao, C.-C. Liang, and K.-Y. Su, \"A diverse corpus for evaluating and developing english math word problem solvers,\" arXiv preprint arXiv:2106.15772, 2021.", + "[319] E. Glazer, E. Erdil, T. Besiroglu, D. Chicharro, E. Chen, A. Gunning, C. F. Olsson, J.-S. Denain, A. Ho, E. d. O. Santos et al., \"Frontiermath: A benchmark for evaluating advanced mathematical reasoning in ai,\" arXiv preprint arXiv:2411.04872, 2024.", + "[320] M. Chen, J. Tworek, H. Jun, Q. Yuan, H. P. D. O. Pinto, J. Kaplan, H. Edwards, Y. Burda, N. Joseph, G. Brockman et al., \"Evaluating large language models trained on code,\" arXiv preprint arXiv:2107.03374, 2021.", + "[321] C. E. Jimenez, J. Yang, A. Wettig, S. Yao, K. Pei, O. Press, and K. Narasimhan, \"Swe-bench: Can language models resolve real-world github issues?\" arXiv preprint arXiv:2310.06770, 2023.", + "[322] X. Zhang, J. Zhao, and Y. LeCun, \"Character-level convolutional networks for text classification,\" Advances in neural information processing systems, vol. 28, 2015.", + "[323] H. Luo, Y. Jin, X. Liu, T. Shang, R. Chen, and Z. Liu, \"Geic: Universal and multilingual named entity recognition with large language models,\" arXiv preprint arXiv:2409.11022, 2024.", + "[324] X. Li, T. Zhang, Y. Dubois, R. Taori, I. Gulrajani, C. Guestrin, P. Liang, and T. B. Hashimoto, \"Alpaca-eval: An automatic evaluator of instruction-following models,\" 2023.", + "[325] W.-L. Chiang, L. Zheng, Y. Sheng, A. N. Angelopoulos, T. Li, D. Li, B. Zhu, H. Zhang, M. Jordan, J. E. Gonzalez et al., \"Chatbot arena: An open platform for evaluating llms by human preference,\" in *Forty-first International Conference on Machine Learning*, 2024.", + "[326] B. Gliwa, I. Mochol, M. Biesek, and A. Wawer, \"Samsum corpus: A human-annotated dialogue" + ], + "bbox": [ + 506, + 53, + 921, + 943 + ], + "page_idx": 46 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 46 + }, + { + "type": "page_number", + "text": "47", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 46 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "dataset for abstractive summarization,\" arXiv preprint arXiv:1911.12237, 2019.", + "[327] M. Macháček and O. Bojar, \"Results of the wmt14 metrics shared task,\" in Proceedings of the Ninth Workshop on Statistical Machine Translation, 2014, pp. 293-301.", + "[328] X. Lu, D. Liu, Y. Yu, L. Xu, and J. Shao, \"X-boundary: Establishing exact safety boundary to shield llms from multi-turn jailbreaks without compromising usability,\" arXiv preprint arXiv:2502.09990, 2025.", + "[329] OpenAI, \"Moderation api,\" https://platform.openai.com/docs/guides/moderation/overview, 2023.", + "[330] H. Inan, K. Upasani, J. Chi, R. Rungta, K. Iyer, Y. Mao, M. Tontchev, Q. Hu, B. Fuller, D. Testuggine, and M. Khabsa, \"Llama guard: Llm-based input-output safeguard for human-ai conversations,\" CoRR, 2023.", + "[331] J. Ji, T. Qiu, B. Chen, B. Zhang, H. Lou, K. Wang, Y. Duan, Z. He, J. Zhou, Z. Zhang et al., \"Ai alignment: A comprehensive survey,\" arXiv preprint arXiv:2310.19852, 2023.", + "[332] T. A. Qiu, Y. Zhang, X. Huang, J. Li, J. Ji, and Y. Yang, \"Progressgym: Alignment with a millennium of moral progress,\" Advances in Neural Information Processing Systems, vol. 37, pp. 14570-14607, 2024.", + "[333] B. Wang, W. Chen, H. Pei, C. Xie, M. Kang, C. Zhang, C. Xu, Z. Xiong, R. Dutta, R. Schaeffer et al., \"Decoding trust: A comprehensive assessment of trustworthiness in gpt models.\" in NeurIPS, 2023.", + "[334] S. Gehman, S. Gururangan, M. Sap, Y. Choi, and N. A. Smith, \"Realtoxicityprompts: Evaluating neural toxic degeneration in language models,\" arXiv preprint arXiv:2009.11462, 2020.", + "[335] Y. Wang, H. Li, X. Han, P. Nakov, and T. Baldwin, \"Do-not-answer: A dataset for evaluating safeguards in llms,\" arXiv preprint arXiv:2308.13387, 2023.", + "[336] M. Conover, R. Staats, A. Rane, G. Shani, K. Katz, A. Powell, A. Ross, A. Maas, and A. Zhang, \"Databricks-dolly: Introducing dolly-15k, democratizing the magic of instruction following,\" https://github.com/databrickslabs/dolly, 2023.", + "[337] X. Wu, Y. Hao, K. Sun, Y. Chen, F. Zhu, R. Zhao, and H. Li, \"Human preference score v2: A solid benchmark for evaluating human preferences of text-to-image synthesis,\" arXiv preprint arXiv:2306.09341, 2023.", + "[338] Y. Yan, S. Wang, J. Huo, H. Li, B. Li, J. Su, X. Gao, Y.-F. Zhang, T. Xu, Z. Chu et al., \"Errorradar: Benchmarking complex mathematical reasoning of multimodal large language models via error detection,\" arXiv preprint arXiv:2410.04509, 2024.", + "[339] Q. Jin, B. Dhingra, Z. Liu, W. W. Cohen, and X. Lu, \"Pubmedqa: A dataset for biomedical research question answering,\" arXiv preprint arXiv:1909.06146, 2019.", + "[340] K. M. Hermann, T. Kocisky, E. Grefenstette, L. Espeholt, W. Kay, M. Suleyman, and P. Blunsom, \"Teaching machines to read and comprehend,\" Advances in neural information processing systems, vol. 28, 2015.", + "[341] S. Lin, J. Hilton, and O. Evans, \"Truthfulqa: Measuring how models mimic human falsehoods,\" arXiv preprint arXiv:2109.07958, 2021.", + "[342] Y. Mou, S. Zhang, and W. Ye, \"Sg-bench: Evaluating llm safety generalization across diverse tasks and" + ], + "bbox": [ + 76, + 53, + 491, + 943 + ], + "page_idx": 47 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "prompt types,\" Advances in Neural Information Processing Systems, vol. 37, pp. 123032-123054, 2024.", + "[343] F. Jiang, Z. Xu, Y. Li, L. Niu, Z. Xiang, B. Li, B. Y. Lin, and R. Poovendran, \"Safechain: Safety of language models with long chain-of-thought reasoning capabilities,\" arXiv preprint arXiv:2502.12025, 2025.", + "[344] T. Hartvigsen, S. Gabriel, H. Palangi, M. Sap, D. Ray, and E. Kamar, \"Toxigen: A large-scale machine-generated dataset for adversarial and implicit hate speech detection,\" arXiv preprint arXiv:2203.09509, 2022.", + "[345] A. Souly, Q. Lu, D. Bowen, T. Trinh, E. Hsieh, S. Pandey, P. Abbeel, J. Svegliato, S. Emmons, O. Watkins et al., \"A strongreject for empty jailbreaks,\" arXiv preprint arXiv:2402.10260, 2024.", + "[346] L. Jiang, K. Rao, S. Han, A. Ettinger, F. Brahman, S. Kumar, N. Mireshghallah, X. Lu, M. Sap, Y. Choi et al., \"Wildteaming at scale: From in-the-wild jailbreaks to (adversarily) safer language models,\" Advances in Neural Information Processing Systems, vol. 37, pp. 47094-47165, 2024.", + "[347] D. Hendrycks, M. Mazeika, and T. Woodside, \"An overview of catastrophic ai risks,\" arXiv preprint arXiv:2306.12001, 2023.", + "[348] B. Baker, J. Huizinga, L. Gao, Z. Dou, M. Y. Guan, A. Madry, W. Zaremba, J. Pachocki, and D. Farhi, \"Monitoring reasoning models for misbehavior and the risks of promoting obfuscation,\" arXiv preprint arXiv:2503.11926, 2025.", + "[349] T. Hagendorff, \"Deception abilities emerged in large language models,\" Proceedings of the National Academy of Sciences, vol. 121, no. 24, p. e2317967121, 2024. [Online]. Available: https://www.pnas.org/doi/abs/10.1073/pnas.2317967121", + "[350] P. S. Park, S. Goldstein, A. O'Gara, M. Chen, and D. Hendrycks, \"Ai deception: A survey of examples, risks, and potential solutions,\" Patterns, vol. 5, no. 5, 2024.", + "[351] OpenAI, \"Gpt-4 technical report,\" ArXiv, vol. abs/2303.08774, 2023.", + "[352] F. Ward, F. Toni, F. Belardinelli, and T. Everitt, \"Honesty is the best policy: defining and mitigating ai deception,\" Advances in neural information processing systems, vol. 36, pp. 2313-2341, 2023.", + "[353] J. Scheurer, M. Balesni, and M. Hobbahn, \"Large language models can strategically deceive their users when put under pressure,\" arXiv preprint arXiv:2311.07590, 2023.", + "[354] S. Chern, Z. Hu, Y. Yang, E. Chern, Y. Guo, J. Jin, B. Wang, and P. Liu, \"Behonest: Benchmarking honesty in large language models,\" arXiv preprint arXiv:2406.13261, 2024.", + "[355] A. O'Gara, \"Hoodwinked: Deception and cooperation in a text-based game for language models,\" arXiv preprint arXiv:2308.01404, 2023.", + "[356] M. F. A. R. D. T. (FAIR)†, A. Bakhtin, N. Brown, E. Dinan, G. Farina, C. Flaherty, D. Fried, A. Goff, J. Gray, H. Hu et al., \"Human-level play in the game of diplomacy by combining language models with strategic reasoning,\" Science, vol. 378, no. 6624, pp. 1067-1074, 2022." + ], + "bbox": [ + 506, + 53, + 921, + 941 + ], + "page_idx": 47 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 47 + }, + { + "type": "page_number", + "text": "48", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 47 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[357] L. Schulz, N. Alon, J. Rosenschein, and P. Dayan, \"Emergent deception and skepticism via theory of mind,\" in First Workshop on Theory of Mind in Communicating Agents, 2023.", + "[358] A. Meinke, B. Schoen, J. Scheurer, M. Balesni, R. Shah, and M. Hobbahn, \"Frontier models are capable of in-context scheming,\" arXiv preprint arXiv:2412.04984, 2024.", + "[359] R. Greenblatt, C. Denison, B. Wright, F. Roger, M. Mac-Diarmid, S. Marks, J. Treutlein, T. Belonax, J. Chen, D. Duvenaud et al., \"Alignment faking in large language models,\" arXiv preprint arXiv:2412.14093, 2024.", + "[360] A. Pan, J. S. Chan, A. Zou, N. Li, S. Basart, T. Woodside, H. Zhang, S. Emmons, and D. Hendrycks, \"Do the rewards justify the means? measuring trade-offs between rewards and ethical behavior in the machiavelli benchmark,\" in International conference on machine learning. PMLR, 2023, pp. 26837-26867.", + "[361] L. Vaugrante, F. Carlon, M. Menke, and T. Hagen-dorff, \"Compromising honesty and harmlessness in language models via deception attacks,\" arXiv preprint arXiv:2502.08301, 2025.", + "[362] J. Ji, K. Wang, T. Qiu, B. Chen, J. Zhou, C. Li, H. Lou, and Y. Yang, \"Language models resist alignment,\" arXiv preprint arXiv:2406.06144, 2024.", + "[363] L. Bürger, F. A. Hamprecht, and B. Nadler, \"Truth is universal: Robust detection of lies in llms,\" Advances in Neural Information Processing Systems, vol. 37, pp. 138-393-138-431, 2024.", + "[364] OpenAI, \"Detecting misbehavior in frontier reasoning models,\" https://openai.com/index/chain-of-thought-monitoring/, Mar. 2025, accessed: 2025-05-14.", + "[365] T. Everitt, V. Krakovna, L. Orseau, M. Hutter, and S. Legg, \"Reinforcement learning with a corrupted reward channel,\" arXiv preprint arXiv:1705.08417, 2017.", + "[366] S. Zhuang and D. Hadfield-Menell, \"Consequences of misaligned ai,\" Advances in Neural Information Processing Systems, vol. 33, pp. 15763-15773, 2020.", + "[367] V. Krakovna, J. Uesato, V. Mikulik, M. Rahtz, T. Everitt, R. Kumar, Z. Kenton, J. Leike, and S. Legg, \"Specification gaming: the flip side of ai ingenuity,\" 2020, accessed: 2025-03-30. [Online]. Available: https://deepmind.google/discover/blog/ specification-gaming-the-flip-side-of-ai-ingenuity/", + "[368] D. Amodei, C. Olah, J. Steinhardt, P. Christiano, J. Schulman, and D. Mané, \"Concrete problems in air safety,\" arXiv preprint arXiv:1606.06565, 2016.", + "[369] L. Weng, \"Reward hacking in reinforcement learning,\" 2024, accessed: 2025-03-30. [Online]. Available: https://lilianweng.github.io/posts/2024-11-28-reward-hacking", + "[370] T. Everitt, M. Hutter, R. Kumar, and V. Krakovna, \"Reward tampering problems and solutions in reinforcement learning: A causal influence diagram perspective,\" Synthese, vol. 198, no. Suppl 27, pp. 6435-6467, 2021.", + "[371] J. Skalse, N. Howe, D. Krasheninnikov, and D. Krueger, \"Defining and characterizing reward gaming,\" Advances in Neural Information Processing Systems, vol. 35, pp. 9460-9471, 2022." + ], + "bbox": [ + 76, + 53, + 491, + 943 + ], + "page_idx": 48 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[372] S. Casper, X. Davies, C. Shi, T. K. Gilbert, J. Scheurer, J. Rando, R. Freedman, T. Korbak, D. Lindner, P. Freire et al., \"Open problems and fundamental limitations of reinforcement learning from human feedback,\" arXiv preprint arXiv:2307.15217, 2023.", + "[373] L. Gao, J. Schulman, and J. Hilton, \"Scaling laws for reward model overoptimization,\" in International Conference on Machine Learning. PMLR, 2023, pp. 10835-10866.", + "[374] E. Perez, S. Ringer, K. Lukosiute, K. Nguyen, E. Chen, S. Heiner, C. Pettit, C. Olsson, S. Kundu, S. Kadavath et al., \"Discovering language model behaviors with model-written evaluations,\" in Findings of the Association for Computational Linguistics: ACL 2023, 2023, pp. 13387-13434.", + "[375] C. Denison, M. MacDiarmid, F. Berez, D. Duvenaud, S. Kravec, S. Marks, N. Schiefer, R. Soklaski, A. Tamkin, J. Kaplan et al., \"Sycophancy to subterfuge: Investigating reward-tampering in large language models,\" arXiv preprint arXiv:2406.10162, 2024.", + "[376] P. Singhal, T. Goyal, J. Xu, and G. Durrett, \"A long way to go: Investigating length correlations in rlhf,\" arXiv preprint arXiv:2310.03716, 2023.", + "[377] F. Bianchi, M. Suzgun, G. Attanasio, P. Röttger, D. Jurafsky, T. Hashimoto, and J. Zou, \"Safety-tuned llamas: Lessons from improving the safety of large language models that follow instructions,\" arXiv preprint arXiv:2309.07875, 2023.", + "[378] M. Tegmark and S. Omohundro, \"Provably safe systems: the only path to controllable agi,\" arXiv preprint arXiv:2309.01933, 2023.", + "[379] D. Dalrymple, J. Skalse, Y. Bengio, S. Russell, M. Tegmark, S. Seshia, S. Omohundro, C. Szegedy, B. Goldhaber, N. Ammann et al., \"Towards guaranteed safe ai: A framework for ensuring robust and reliable ai systems,\" arXiv preprint arXiv:2405.06624, 2024.", + "[380] A. Caliskan, J. J. Bryson, and A. Narayanan, \"Semantics derived automatically from language corpora contain human-like biases,\" Science, vol. 356, no. 6334, pp. 183-186, 2017.", + "[381] R. Xu, Z. Zhou, T. Zhang, Z. Qi, S. Yao, K. Xu, W. Xu, and H. Qiu, \"Walking in others' shoes: How perspective-taking guides large language models in reducing toxicity and bias,\" arXiv preprint arXiv:2407.15366, 2024.", + "[382] D. Acemoglu and P. Restrepo, \"Artificial intelligence, automation, and work,\" in The economics of artificial intelligence: An agenda. University of Chicago Press, 2018, pp. 197-236.", + "[383] J. Mokander, J. Schuett, H. R. Kirk, and L. Floridi, \"Auditing large language models: a three-layered approach,\" AI and Ethics, vol. 4, no. 4, pp. 1085-1115, 2024.", + "[384] M. Anderljung, J. Barnhart, A. Korinek, J. Leung, C. O'Keefe, J. Whittlestone, S. Avin, M. Brundage, J. Bullock, D. Cass-Beggs et al., \"Frontier ai regulation: Managing emerging risks to public safety,\" arXiv preprint arXiv:2307.03718, 2023.", + "[385] A. Mannes, \"Governance, risk, and artificial intelligence,\" *Ai Magazine*, vol. 41, no. 1, pp. 61-69, 2020.", + "[386] L. Koessler and J. Schuett, \"Risk assessment at agi" + ], + "bbox": [ + 506, + 53, + 921, + 943 + ], + "page_idx": 48 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 48 + }, + { + "type": "page_number", + "text": "49", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 48 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "companies: A review of popular risk assessment techniques from other safety-critical industries,\" arXiv preprint arXiv:2307.08823, 2023.", + "[387] J. Schuett, N. Dreksler, M. Anderljung, D. McCaffary, L. Heim, E. Bluemke, and B. Garfinkel, \"Towards best practices in agi safety and governance: A survey of expert opinion,\" arXiv preprint arXiv:2305.07153, 2023.", + "[388] L. Ho, J. Barnhart, R. Trager, Y. Bengio, M. Brundage, A. Carnegie, R. Chowdhury, A. Dafoe, G. Hadfield, M. Levi et al., \"International institutions for advanced ai,\" arXiv preprint arXiv:2307.04699, 2023.", + "[389] M. M. Maas, \"Aligning ai regulation to sociotechnical change,\" in The Oxford Handbook of AI Governance, 2022.", + "[390] M. Kinniment, L. J. K. Sato, H. Du, B. Goodrich, M. Hasin, L. Chan, L. H. Miles, T. R. Lin, H. Wijk, J. Burget et al., \"Evaluating language-model agents on realistic autonomous tasks,\" arXiv preprint arXiv:2312.11671, 2023.", + "[391] J. Tallberg, E. Erman, M. Furendal, J. Geith, M. Klamberg, and M. Lundgren, \"The global governance of artificial intelligence: Next steps for empirical and normative research,\" International Studies Review, vol. 25, no. 3, p. viad040, 2023.", + "[392] OECD, \"OECD Principles on Artificial Intelligence,\" https://oecd.ai/en/ai-principles, 2019.", + "[393] UNESCO, \"Recommendation on the Ethics of Artificial Intelligence,\" https://unesdoc.unesco.org/ark:/48223/pf0000381137, 2021.", + "[394] E. Seger, N. Dreksler, R. Moulange, E. Dardaman, J. Schuett, K. Wei, C. Winter, M. Arnold, S. O. hEigeartaigh, A. Korinek et al., \"Open-sourcing highly capable foundation models: An evaluation of risks, benefits, and alternative methods for pursuing open-source objectives,\" arXiv preprint arXiv:2311.09227, 2023.", + "[395] F. Urbina, F. Lentzos, C. Invernizzi, and S. Ekins, \"Dual use of artificial-intelligence-powered drug discovery,\" Nature machine intelligence, vol. 4, no. 3, pp. 189-191, 2022.", + "[396] Meta, \"Meta and Microsoft introduce the next generation of Llama,\" https://ai.meta.com/blog/llama-2, 2023.", + "[397] E. Mostaque, \"Democratizing ai, stable diffusion & generative models,\" https://exchange scale.com/public/videos/emad-mostaque-stability-ai-stable-diffusion-open-sou2022.", + "[398] J. A. Goldstein, G. Sastry, M. Musser, R. DiResta, M. Gentzel, and K. Sedova, \"Generative language models and automated influence operations: Emerging threats and potential mitigations,\" arXiv preprint arXiv:2301.04246, 2023.", + "[399] I. Solaiman, M. Brundage, J. Clark, A. Askell, A. Herbert-Voss, J. Wu, A. Radford, G. Krueger, J. W. Kim, S. Kreps et al., \"Release strategies and the social impacts of language models,\" arXiv preprint arXiv:1908.09203, 2019.", + "[400] P. Chavez, \"An ai challenge: Balancing open and closed systems,\" https://cepa.org/article/an-ai-challenge-balancing-open-and-closed-systems," + ], + "bbox": [ + 76, + 53, + 491, + 943 + ], + "page_idx": 49 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "2023.", + "[401] N. Zhang, Y. Yao, B. Tian, P. Wang, S. Deng, M. Wang, Z. Xi, S. Mao, J. Zhang, Y. Ni et al., \"A comprehensive study of knowledge editing for large language models,\" arXiv preprint arXiv:2401.01286, 2024.", + "[402] J. Fang, H. Jiang, K. Wang, Y. Ma, X. Wang, X. He, and T.-s. Chua, \"Alphaedit: Null-space constrained knowledge editing for language models,\" arXiv preprint arXiv:2410.02355, 2024.", + "[403] Z. Zhang, Y. Zhou, X. Zhao, T. Che, and L. Lyu, \"Prompt certified machine unlearning with randomized gradient smoothing and quantization,\" Advances in Neural Information Processing Systems, vol. 35, pp. 13433-13455, 2022.", + "[404] T. Che, Y. Zhou, Z. Zhang, L. Lyu, J. Liu, D. Yan, D. Dou, and J. Huan, \"Fast federated machine unlearning with nonlinear functional theory,\" in International conference on machine learning. PMLR, 2023, pp. 4241-4268.", + "[405] W. Wang, Z. Tian, C. Zhang, and S. Yu, \"Machine unlearning: A comprehensive survey,\" arXiv preprint arXiv:2405.07406, 2024.", + "[406] S. Liu, Y. Yao, J. Jia, S. Casper, N. Baracaldo, P. Hase, Y. Yao, C. Y. Liu, X. Xu, H. Li et al., \"Rethinking machine unlearning for large language models,\" Nature Machine Intelligence, pp. 1-14, 2025.", + "[407] Y. Yao, X. Xu, and Y. Liu, \"Large language model unlearning,\" Advances in Neural Information Processing Systems, vol. 37, pp. 105-425-105-475, 2025.", + "[408] C. Ding, J. Wu, Y. Yuan, J. Lu, K. Zhang, A. Su, X. Wang, and X. He, \"Unified parameter-efficient unlearning for llms,\" arXiv preprint arXiv:2412.00383, 2024.", + "[409] Z. Li, H. Jiang, H. Chen, B. Bi, Z. Zhou, F. Sun, J. Fang, and X. Wang, \"Reinforced lifelong editing for language models,\" arXiv preprint arXiv:2502.05759, 2025.", + "[410] E. Mitchell, C. Lin, A. Bosselut, C. Finn, and C. D. Manning, \"Fast model editing at scale,\" arXiv preprint arXiv:2110.11309, 2021.", + "[411] N. De Cao, W. Aziz, and I. Titov, \"Editing factual knowledge in language models,\" arXiv preprint arXiv:2104.08164, 2021.", + "[412] P. Wang, Z. Li, N. Zhang, Z. Xu, Y. Yao, Y. Jiang, P. Xie, F. Huang, and H. Chen, \"Wise: Rethinking the knowledge memory for lifelong model editing of large language models,\" arXiv preprint arXiv:2405.14768, 2024.", + "[413] T. Hartvigsen, S. Sankaranarayanan, H. Palangi, Y. Kim, and M. Ghassemi, \"Aging with grace: Lifelong model editing with discrete key-value adaptors,\" Advances in Neural Information Processing Systems, vol. 36, 2024.", + "[414] H. Jiang, J. Fang, N. Zhang, G. Ma, M. Wan, X. Wang, X. He, and T.-s. Chua, \"Anyedit: Edit any knowledge encoded in language models,\" arXiv preprint arXiv:2502.05628, 2025.", + "[415] H. Jiang, J. Fang, T. Zhang, A. Zhang, R. Wang, T. Liang, and X. Wang, \"Neuron-level sequential editing for large language models,\" arXiv preprint arXiv:2410.04045, 2024.", + "[416] K. Meng, D. Bau, A. Andonian, and Y. Belinkov," + ], + "bbox": [ + 508, + 54, + 921, + 943 + ], + "page_idx": 49 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 49 + }, + { + "type": "page_number", + "text": "50", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 49 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "\"Locating and editing factual associations in gpt,\" Advances in Neural Information Processing Systems, vol. 35, pp. 17359-17372, 2022.", + "[417] A. Prasad, P. Hase, X. Zhou, and M. Bansal, \"Grips: Gradient-free, edit-based instruction search for prompting large language models,\" arXiv preprint arXiv:2203.07281, 2022.", + "[418] G. Gangadhar and K. Stratos, \"Model editing by standard fine-tuning,\" arXiv preprint arXiv:2402.11078, 2024.", + "[419] E. Mitchell, C. Lin, A. Bosselut, C. D. Manning, and C. Finn, \"Memory-based model editing at scale,\" in International Conference on Machine Learning. PMLR, 2022, pp. 15817-15831.", + "[420] Y. Yao, P. Wang, B. Tian, S. Cheng, Z. Li, S. Deng, H. Chen, and N. Zhang, \"Editing large language models: Problems, methods, and opportunities,\" arXiv preprint arXiv:2305.13172, 2023.", + "[421] K. Meng, A. S. Sharma, A. Andonian, Y. Belinkov, and D. Bau, \"Mass-editing memory in a transformer,\" arXiv preprint arXiv:2210.07229, 2022.", + "[422] J.-C. Gu, H.-X. Xu, J.-Y. Ma, P. Lu, Z.-H. Ling, K.-W. Chang, and N. Peng, \"Model editing can hurt general abilities of large language models,\" arXiv e-prints, pp. arXiv-2401, 2024.", + "[423] X. Li, S. Li, S. Song, J. Yang, J. Ma, and J. Yu, \"Pmet: Precise model editing in a transformer,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 17, 2024, pp. 18564-18572.", + "[424] M. Zhang, X. Ye, Q. Liu, P. Ren, S. Wu, and Z. Chen, \"Knowledge graph enhanced large language model editing,\" arXiv preprint arXiv:2402.13593, 2024.", + "[425] C. Chen, B. Huang, Z. Li, Z. Chen, S. Lai, X. Xu, J.-C. Gu, J. Gu, H. Yao, C. Xiao et al., \"Can editing llms inject harm?\" arXiv preprint arXiv:2407.20224, 2024.", + "[426] M. Wang, N. Zhang, Z. Xu, Z. Xi, S. Deng, Y. Yao, Q. Zhang, L. Yang, J. Wang, and H. Chen, \"Detoxifying large language models via knowledge editing,\" arXiv preprint arXiv:2403.14472, 2024.", + "[427] C. Zheng, L. Li, Q. Dong, Y. Fan, Z. Wu, J. Xu, and B. Chang, \"Can we edit factual knowledge by in-context learning?\" arXiv preprint arXiv:2305.12740, 2023.", + "[428] Y. Li, T. Li, K. Chen, J. Zhang, S. Liu, W. Wang, T. Zhang, and Y. Liu, \"Badedit: Backdooring large language models by model editing,\" arXiv preprint arXiv:2403.13355, 2024.", + "[429] K. Grimes, M. Christiani, D. Shriver, and M. Connor, \"Concept-rot: Poisoning concepts in large language models with model editing,\" arXiv preprint arXiv:2412.13341, 2024.", + "[430] X. Wu, J. Li, M. Xu, W. Dong, S. Wu, C. Bian, and D. Xiong, \"Depn: Detecting and editing privacy neurons in pretrained language models,\" arXiv preprint arXiv:2310.20138, 2023.", + "[431] X. Li, Z. Li, Y. Kosuga, Y. Yoshida, and V. Bian, \"Precision knowledge editing: Enhancing safety in large language models,\" arXiv preprint arXiv:2410.03772, 2024.", + "[432] X. Hu, D. Li, B. Hu, Z. Zheng, Z. Liu, and M. Zhang, \"Separate the wheat from the chaff: Model deficiency unlearning via parameter-efficient module op" + ], + "bbox": [ + 76, + 53, + 491, + 943 + ], + "page_idx": 50 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "eration,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 16, 2024, pp. 18252-18260.", + "[433] T. Yang, L. Dai, Z. Liu, X. Wang, M. Jiang, Y. Tian, and X. Zhang, \"Cliperase: Efficient unlearning of visual-textual associations in clip,\" arXiv preprint arXiv:2410.23330, 2024.", + "[434] R. Gandikota, J. Materzynska, J. Fiotto-Kaufman, and D. Bau, \"Erasing concepts from diffusion models,\" 2023 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 2426-2436, 2023.", + "[435] E. Zhang, K. Wang, X. Xu, Z. Wang, and H. Shi, \"Forget-me-not: Learning to forget in text-to-image diffusion models,\" 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pp. 1755-1764, 2023.", + "[436] C. Fan, J. Liu, Y. Zhang, D. Wei, E. Wong, and S. Liu, \"Salun: Empowering machine unlearning via gradient-based weight saliency in both image classification and generation,\" ArXiv, vol. abs/2310.12508, 2023.", + "[437] Z. Huang, X. Cheng, J. Zheng, H. Wang, Z. He, T. Li, and X. Huang, \"Unified gradient-based machine unlearning with remain geometry enhancement,\" ArXiv, vol. abs/2409.19732, 2024.", + "[438] A. Blanco-Justicia, J. Domingo-Ferrer, N. M. Jebreel, B. Manzanares-Salor, and D. Sánchez, \"Unlearning in large language models: We are not there yet,\" Computer, vol. 58, no. 1, pp. 97-100, 2025.", + "[439] S. Dai, C. Xu, S. Xu, L. Pang, Z. Dong, and J. Xu, \"Bias and unfairness in information retrieval systems: New challenges in the llm era,\" in Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, 2024, pp. 6437-6447.", + "[440] G. Nicolas and A. Caliskan, \"A taxonomy of stereotype content in large language models,\" arXiv preprint arXiv:2408.00162, 2024.", + "[441] S. Wang, R. Li, X. Chen, Y. Yuan, D. F. Wong, and M. Yang, \"Exploring the impact of personality traits on llm bias and toxicity,\" arXiv preprint arXiv:2502.12566, 2025.", + "[442] A. Liu, Q. Sheng, and X. Hu, \"Preventing and detecting misinformation generated by large language models,\" in Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval, 2024, pp. 3001-3004.", + "[443] Q. Zhang, H. Qiu, D. Wang, H. Qian, Y. Li, T. Zhang, and M. Huang, \"Understanding the dark side of lms' intrinsic self-correction,\" arXiv preprint arXiv:2412.14959, 2024.", + "[444] R. Xu, B. Lin, S. Yang, T. Zhang, W. Shi, T. Zhang, Z. Fang, W. Xu, and H. Qiu, \"The earth is flat because...: Investigating llms' belief towards misinformation via persuasive conversation,\" in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2024, pp. 16259-16303.", + "[445] Z. Liu, G. Dou, Z. Tan, Y. Tian, and M. Jiang, \"Machine unlearning in generative ai: A survey,\" arXiv preprint arXiv:2407.20516, 2024.", + "[446] Y. Qu, M. Ding, N. Sun, K. Thilakarathna, T. Zhu, and D. Niyato, \"The frontier of data erasure: Machine" + ], + "bbox": [ + 506, + 53, + 921, + 941 + ], + "page_idx": 50 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 50 + }, + { + "type": "page_number", + "text": "51", + "bbox": [ + 906, + 32, + 919, + 42 + ], + "page_idx": 50 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "unlearning for large language models,\" arXiv preprint arXiv:2403.15779, 2024.", + "[447] A. Blanco-Justicia, N. Jebreel, B. Manzanares-Salor, D. Sánchez, J. Domingo-Ferrer, G. Collell, and K. Eeik Tan, \"Digital forgetting in large language models: A survey of unlearning methods,\" Artificial Intelligence Review, vol. 58, no. 3, p. 90, 2025.", + "[448] N. Li, C. Zhou, Y. Gao, H. Chen, Z. Zhang, B. Kuang, and A. Fu, \"Machine unlearning: Taxonomy, metrics, applications, challenges, and prospects,\" IEEE Transactions on Neural Networks and Learning Systems, 2025.", + "[449] C. Gao, L. Wang, C. Weng, X. Wang, and Q. Zhu, \"Practical unlearning for large language models,\" arXiv preprint arXiv:2407.10223, 2024.", + "[450] P. Thaker, S. Hu, N. Kale, Y. Maurya, Z. S. Wu, and V. Smith, \"Position: Llm unlearning benchmarks are weak measures of progress,\" arXiv preprint arXiv:2410.02879, 2024.", + "[451] K. Zhao, M. Kurmanji, G.-O. Bärbulescu, E. Triantafillou, and P. Triantafillou, \"What makes unlearning hard and what to do about it,\" Advances in Neural Information Processing Systems, vol. 37, pp. 12293-12333, 2025.", + "[452] W. Wang, M. Zhang, X. Ye, Z. Ren, Z. Chen, and P. Ren, \"Uipe: Enhancing llm unlearning by removing knowledge related to forgetting targets,\" arXiv preprint arXiv:2503.04693, 2025.", + "[453] H. Wang, Y. Jing, H. Sun, Y. Wang, J. Wang, J. Liao, and D. Tao, \"Erasing without remembering: Safeguarding knowledge forgetting in large language models,\" arXiv preprint arXiv:2502.19982, 2025.", + "[454] T. Tran, R. Liu, and L. Xiong, \"Tokens for learning, tokens for unlearning: Mitigating membership inference attacks in large language models via dual-purpose training,\" arXiv preprint arXiv:2502.19726, 2025.", + "[455] H. Xu, N. Zhao, L. Yang, S. Zhao, S. Deng, M. Wang, B. Hooi, N. Oo, H. Chen, and N. Zhang, \"Relearn: Unlearning via learning for large language models,\" arXiv preprint arXiv:2502.11190, 2025.", + "[456] Q. Zhang, H. Qiu, D. Wang, Y. Li, T. Zhang, W. Zhu, H. Weng, L. Yan, and C. Zhang, \"Large scale knowledge washing,\" in The Thirteenth International Conference on Learning Representations, 2025.", + "[457] A. Thudi, H. Jia, I. Shumailov, and N. Papernot, \"On the necessity of auditable algorithmic definitions for machine unlearning,\" in 31st USENIX security symposium (USENIX Security 22), 2022, pp. 4007-4022.", + "[458] S. Goel, A. Prabhu, P. Torr, P. Kumaraguru, and A. Sanyal, \"Corrective machine unlearning,\" Transactions on Machine Learning Research.", + "[459] A. Thudi, G. Deza, V. Chandrasekaran, and N. Papernot, \"Unrolling sgd: Understanding factors influencing machine unlearning,\" in 2022 IEEE 7th European Symposium on Security and Privacy (EuroS&P). IEEE, 2022, pp. 303-319.", + "[460] B. Liu, Q. Liu, and P. Stone, \"Continual learning and private unlearning,\" in Conference on Lifelong Learning Agents. PMLR, 2022, pp. 243-254.", + "[461] Q. P. Nguyen, B. K. H. Low, and P. Jaillet, \"Variational bayesian unlearning,\" Advances in Neural Information Processing Systems, vol. 33, pp. 16025-16036, 2020." + ], + "bbox": [ + 76, + 53, + 491, + 941 + ], + "page_idx": 51 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[462] L. Wang, T. Chen, W. Yuan, X. Zeng, K.-F. Wong, and H. Yin, \"Kga: A general machine unlearning framework based on knowledge gap alignment,\" arXiv preprint arXiv:2305.06535, 2023.", + "[463] Y. Liu, Y. Zhang, T. Jaakkola, and S. Chang, \"Revisiting who's harry potter: Towards targeted unlearning from a causal intervention perspective,\" arXiv preprint arXiv:2407.16997, 2024.", + "[464] P. Maini, Z. Feng, A. Schwarzschild, Z. C. Lipton, and J. Z. Kolter, \"Tofu: A task of fictitious unlearning for llms,\" arXiv preprint arXiv:2401.06121, 2024.", + "[465] R. Zhang, L. Lin, Y. Bai, and S. Mei, \"Negative preference optimization: From catastrophic collapse to effective unlearning,\" arXiv preprint arXiv:2404.05868, 2024.", + "[466] R. Rafailov, A. Sharma, E. Mitchell, C. D. Manning, S. Ermon, and C. Finn, \"Direct preference optimization: Your language model is secretly a reward model,\" Advances in Neural Information Processing Systems, vol. 36, 2024.", + "[467] J. Huo, Y. Yan, X. Zheng, Y. Lyu, X. Zou, Z. Wei, and X. Hu, \"Mmunlearner: Reformulating multimodal machine unlearning in the era of multimodal large language models,\" arXiv preprint arXiv:2502.11051, 2025.", + "[468] J. Li, Q. Wei, C. Zhang, G. Qi, M. Du, Y. Chen, and S. Bi, \"Single image unlearning: Efficient machine unlearning in multimodal large language models,\" arXiv preprint arXiv:2405.12523, 2024.", + "[469] S. Xing, F. Zhao, Z. Wu, T. An, W. Chen, C. Li, J. Zhang, and X. Dai, \"Efuf: Efficient fine-grained unlearning framework for mitigating hallucinations in multimodal large language models,\" ArXiv, vol. abs/2402.09801, 2024.", + "[470] T. Chakraborty, E. Shayegani, Z. Cai, N. B. Abu-Ghazaleh, M. S. Asif, Y. Dong, A. K. Roy-Chowdhury, and C. Song, \"Cross-modal safety alignment: Is textual unlearning all you need?\" ArXiv, vol. abs/2406.02575, 2024.", + "[471] J. Chen, Z. Deng, K. Zheng, Y. Yan, S. Liu, P. Wu, P. Jiang, J. Liu, and X. Hu, \"Safeeraser: Enhancing safety in multimodal large language models through multimodal machine unlearning,\" arXiv preprint arXiv:2502.12520, 2025.", + "[472] G. Ilharco, M. T. Ribeiro, M. Wortsman, S. Gururangan, L. Schmidt, H. Hajishirzi, and A. Farhadi, \"Editing models with task arithmetic,\" arXiv preprint arXiv:2212.04089, 2022.", + "[473] D. Jung, J. Seo, J. Lee, C. Park, and H. Lim, \"Come: An unlearning-based approach to conflict-free model editing,\" arXiv preprint arXiv:2502.15826, 2025.", + "[474] B. Zhang, Z. Chen, Z. Zheng, J. Li, and H. Chen, \"Resolving editing-unlearning conflicts: A knowledge codebook framework for large language model updating,\" arXiv preprint arXiv:2502.00158, 2025.", + "[475] R. Eldan and M. Russinovich, \"Who's harry potter? approximate unlearning in llms,\" arXiv preprint arXiv:2310.02238, 2023.", + "[476] N. Li, A. Pan, A. Gopal, S. Yue, D. Berrios, A. Gatti, J. D. Li, A.-K. Dombrowski, S. Goel, L. Phan et al., \"The wmdp benchmark: Measuring and re" + ], + "bbox": [ + 506, + 53, + 921, + 941 + ], + "page_idx": 51 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 51 + }, + { + "type": "page_number", + "text": "52", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 51 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ducing malicious use with unlearning,\" arXiv preprint arXiv:2403.03218, 2024.", + "[477] M. Pawelczyk, S. Neel, and H. Lakkaraju, \"In-context unlearning: Language models as few shot unlearners,\" arXiv preprint arXiv:2310.07579, 2023.", + "[478] P. Thaker, Y. Maurya, S. Hu, Z. S. Wu, and V. Smith, \"Guardrail baselines for unlearning in llms,\" arXiv preprint arXiv:2403.03329, 2024.", + "[479] J. Ren, Z. Dai, X. Tang, H. Liu, J. Zeng, Z. Li, R. Goutam, S. Wang, Y. Xing, and Q. He, \"A general framework to enhance fine-tuning-based llm unlearning,\" arXiv preprint arXiv:2502.17823, 2025.", + "[480] X. Zhao, W. Cai, T. Shi, D. Huang, L. Lin, S. Mei, and D. Song, \"Improving llm safety alignment with dual-objective optimization,\" arXiv preprint arXiv:2503.03710, 2025.", + "[481] S. Takashiro, T. Kojima, A. Gambardella, Q. Cao, Y. Iwasawa, and Y. Matsuo, \"Answer when needed, forget when not: Language models pretend to forget via in-context knowledge unlearning,\" arXiv preprint arXiv:2410.00382, 2024.", + "[482] A. Muresanu, A. Thudi, M. R. Zhang, and N. Papernot, \"Unlearnable algorithms for in-context learning,\" arXiv preprint arXiv:2402.00751, 2024.", + "[483] Y. Zhou, X. Li, Q. Wang, and J. Shen, \"Visual in-context learning for large vision-language models,\" arXiv preprint arXiv:2402.11574, 2024.", + "[484] Z. Liu, G. Dou, X. Yuan, C. Zhang, Z. Tan, and M. Jiang, \"Modality-aware neuron pruning for unlearning in multimodal large language models,\" arXiv preprint arXiv:2502.15910, 2025.", + "[485] N. Yang, M. Kim, S. Yoon, J. Shin, and K. Jung, \"Faithun: Toward faithful forgetting in language models by investigating the interconnectedness of knowledge,\" arXiv preprint arXiv:2502.19207, 2025.", + "[486] A. Ramakrishna, Y. Wan, X. Jin, K.-W. Chang, Z. Bu, B. Vinzamuri, V. Cevher, M. Hong, and R. Gupta, \"Lume: Llm unlearning with multitask evaluations,\" arXiv preprint arXiv:2502.15097, 2025.", + "[487] Y. Lang, K. Guo, Y. Huang, Y. Zhou, H. Zhuang, T. Yang, Y. Su, and X. Zhang, \"Beyond single-value metrics: Evaluating and enhancing llm unlearning with cognitive diagnosis,\" arXiv preprint arXiv:2502.13996, 2025.", + "[488] Q. Wang, J. P. Zhou, Z. Zhou, S. Shin, B. Han, and K. Q. Weinberger, \"Rethinking llm unlearning objectives: A gradient perspective and go beyond,\" arXiv preprint arXiv:2502.19301, 2025.", + "[489] M. Khoriaty, A. Shportko, G. Mercier, and Z. Wood-Doughty, \"Don't forget it! conditional sparse autoencoder clamping works for unlearning,\" arXiv preprint arXiv:2503.11127, 2025.", + "[490] J. Cheng and H. Amiri, \"Mu-bench: A multitask multimodal benchmark for machine unlearning,\" arXiv preprint arXiv:2406.14796, 2024.", + "[491] V. Patil, Y.-L. Sung, P. Hase, J. Peng, T. Chen, and M. Bansal, \"Unlearning sensitive information in multimodal llms: Benchmark and attack-defense evaluation,\" Transactions on Machine Learning Research.", + "[492] Y. Ma, J. Wang, F. Wang, S. Ma, J. Li, X. Li, F. Huang, L. Sun, B. Li, Y. Choi et al., \"Benchmarking vision lan" + ], + "bbox": [ + 76, + 53, + 491, + 941 + ], + "page_idx": 52 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "guage model unlearning via fictitious facial identity dataset,\" arXiv preprint arXiv:2411.03554, 2024.", + "[493] S. Moon, M. Lee, S. Park, and D. Kim, “Holistic unlearning benchmark: A multi-faceted evaluation for text-to-image diffusion model unlearning,” arXiv preprint arXiv:2410.05664, 2024.", + "[494] D. Sanyal and M. Mandal, \"Alu: Agentic llm unlearning,\" arXiv preprint arXiv:2502.00406, 2025.", + "[495] J. Cheng and H. Amiri, \"Tool unlearning for tool-augmented llms,\" arXiv preprint arXiv:2502.01083, 2025.", + "[496] H. Liu, P. Xiong, T. Zhu, and S. Y. Philip, \"A survey on machine unlearning: Techniques and new emerged privacy risks,\" Journal of Information Security and Applications, vol. 90, p. 104010, 2025.", + "[497] S. Qureshi, T. Shaik, X. Tao, H. Xie, L. Li, J. Yong, and X. Jia, \"Exploring incremental unlearning: Techniques, challenges, and future directions,\" arXiv preprint arXiv:2502.16708, 2025.", + "[498] J. Geng, Q. Li, H. Woisetschlaeger, Z. Chen, Y. Wang, P. Nakov, H.-A. Jacobsen, and F. Karray, \"A comprehensive survey of machine unlearning techniques for large language models,\" arXiv preprint arXiv:2503.01854, 2025.", + "[499] X. He, C. Chen, L. Lyu, and Q. Xu, \"Extracted bert model leaks more information than you think!\" in Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, EMNLP 2022. Association for Computational Linguistics, 2022, pp. 1530-1537.", + "[500] X. He, Q. Xu, L. Lyu, F. Wu, and C. Wang, \"Protecting intellectual property of language generation apis with lexical watermark,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 36, no. 10, 2022, pp. 10758-10766.", + "[501] X. He, Q. Xu, Y. Zeng, L. Lyu, F. Wu, J. Li, and R. Jia, \"Cater: Intellectual property protection on text generation apis via conditional watermarks,\" Advances in Neural Information Processing Systems, vol. 35, pp. 5431-5445, 2022.", + "[502] W. Peng, J. Yi, F. Wu, S. Wu, B. B. Zhu, L. Lyu, B. Jiao, T. Xu, G. Sun, and X. Xie, \"Are you copying my model? protecting the copyright of large language models for eaas via backdoor watermark,\" in Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2023, pp. 7653-7668.", + "[503] N. Carlini, D. Paleka, K. D. Dvijotham, T. Steinke, J. Hayase, A. F. Cooper, K. Lee, M. Jagielski, M. Nasr, A. Conmy et al., \"Stealing part of a production language model,\" arXiv preprint arXiv:2403.06634, 2024.", + "[504] M. Finlayson, X. Ren, and S. Swayamdipta, \"Logits of api-protected llms leak proprietary information,\" arXiv preprint arXiv:2403.09539, 2024.", + "[505] S. Zanella-Beguelin, S. Tople, A. Paverd, and B. Köpf, \"Grey-box extraction of natural language models,\" in International Conference on Machine Learning. PMLR, 2021, pp. 12278-12286.", + "[506] E. Horwitz, J. Kahana, and Y. Hoshen, \"Recovering the pre-fine-tuning weights of generative models,\" arXiv preprint arXiv:2402.10208, 2024." + ], + "bbox": [ + 506, + 53, + 921, + 941 + ], + "page_idx": 52 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 52 + }, + { + "type": "page_number", + "text": "53", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 52 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[507] Z. Li, C. Wang, P. Ma, C. Liu, S. Wang, D. Wu, C. Gao, and Y. Liu, \"On extracting specialized code abilities from large language models: A feasibility study,\" in Proceedings of the IEEE/ACM 46th International Conference on Software Engineering, 2024, pp. 1-13.", + "[508] A. Liu and A. Moitra, \"Model stealing for any low-rank language model,\" arXiv preprint arXiv:2411.07536, 2024.", + "[509] W. Shi, A. Ajith, M. Xia, Y. Huang, D. Liu, T. Blevins, D. Chen, and L. Zettlemoyer, \"Detecting pretraining data from large language models,\" arXiv preprint arXiv:2310.16789, 2023.", + "[510] J. Zhang, J. Sun, E. Yeats, Y. Ouyang, M. Kuo, J. Zhang, H. F. Yang, and H. Li, \"Min- $k\\%$ ++: Improved baseline for detecting pre-training data from large language models,\" arXiv preprint arXiv:2404.02936, 2024.", + "[511] D. Das, J. Zhang, and F. Tramér, \"Blind baselines beat membership inference attacks for foundation models,\" arXiv preprint arXiv:2406.16201, 2024.", + "[512] P. Maini, H. Jia, N. Papernot, and A. Dziedzic, \"Llm dataset inference: Did you train on my dataset?\" Advances in Neural Information Processing Systems, vol. 37, pp. 124069-124092, 2024.", + "[513] A. V. Duarte, X. Zhao, A. L. Oliveira, and L. Li, \"De-cop: Detecting copyrighted content in language models training data,\" arXiv preprint arXiv:2402.09910, 2024.", + "[514] R. Xie, J. Wang, R. Huang, M. Zhang, R. Ge, J. Pei, N. Z. Gong, and B. Dhingra, \"Recall: Membership inference via relative conditional log-likelihoods,\" arXiv preprint arXiv:2406.15968, 2024.", + "[515] F. Galli, L. Melis, and T. Cucinotta, \"Noisy neighbors: Efficient membership inference attacks against llms,\" arXiv preprint arXiv:2406.16565, 2024.", + "[516] H. Mozaffari and V. J. Marathe, \"Semantic membership inference attack against large language models,\" arXiv preprint arXiv:2406.10218, 2024.", + "[517] M. Meeus, S. Jain, M. Rei, and Y.-A. de Montjoye, \"Did the neurons read your book? document-level membership inference for large language models,\" in 33rd USENIX Security Symposium (USENIX Security 24), 2024, pp. 2369-2385.", + "[518] M. Meeus, I. Shilov, M. Faysse, and Y.-A. De Montjoye, \"Copyright traps for large language models,\" arXiv preprint arXiv:2402.09363, 2024.", + "[519] H. Puerto, M. Gubri, S. Yun, and S. J. Oh, \"Scaling up membership inference: When and how attacks succeed on large language models,\" arXiv preprint arXiv:2411.00154, 2024.", + "[520] M. Anderson, G. Amit, and A. Goldsteen, “Is my data in your retrieval database? membership inference attacks against retrieval augmented generation,” arXiv preprint arXiv:2405.20446, 2024.", + "[521] Y. Li, G. Liu, C. Wang, and Y. Yang, \"Generating is believing: Membership inference attacks against retrieval-augmented generation,\" arXiv preprint arXiv:2406.19234, 2024.", + "[522] R. Wen, Z. Li, M. Backes, and Y. Zhang, \"Membership inference attacks against in-context learning,\" in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 3481-" + ], + "bbox": [ + 76, + 53, + 491, + 941 + ], + "page_idx": 53 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "3495.", + "[523] H. Duan, A. Dziedzic, M. Yaghini, N. Papernot, and F. Boenisch, \"On the privacy risk of in-context learning,\" arXiv preprint arXiv:2411.10512, 2024.", + "[524] Y. Wen, L. Marchyok, S. Hong, J. Geiping, T. Goldstein, and N. Carlini, \"Privacy backdoors: Enhancing membership inference through poisoning pre-trained models,\" arXiv preprint arXiv:2404.01231, 2024.", + "[525] R. Wen, T. Wang, M. Backes, Y. Zhang, and A. Salem, \"Last one standing: A comparative analysis of security and privacy of soft prompt tuning, lora, and in-context learning,\" arXiv preprint arXiv:2310.11397, 2023.", + "[526] S. Balloccu, P. Schmidtová, M. Lango, and O. Dusek, \"Leak, cheat, repeat: Data contamination and evaluation malpractices in closed-source llms,\" arXiv preprint arXiv:2402.03927, 2024.", + "[527] W. Fu, H. Wang, C. Gao, G. Liu, Y. Li, and T. Jiang, \"Membership inference attacks against fine-tuned large language models via self-prompt calibration,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024.", + "[528] H. Li, G. Deng, Y. Liu, K. Wang, Y. Li, T. Zhang, Y. Liu, G. Xu, G. Xu, and H. Wang, \"Digger: Detecting copyright content mis-usage in large language model training,\" arXiv preprint arXiv:2401.00676, 2024.", + "[529] A. Naseh and N. Mireshghallah, \"Synthetic data can mislead evaluations: Membership inference as machine text detection,\" arXiv preprint arXiv:2501.11786, 2025.", + "[530] Z. Liao and H. Sun, \"Amplegcg: Learning a universal and transferable generative model of adversarial suffixes for jailbreaking both open and closed llms,\" arXiv preprint arXiv:2404.07921, 2024.", + "[531] X. Jia, T. Pang, C. Du, Y. Huang, J. Gu, Y. Liu, X. Cao, and M. Lin, \"Improved techniques for optimization-based jailbreaking on large language models,\" arXiv preprint arXiv:2405.21018, 2024.", + "[532] Y. Zhang and Z. Wei, \"Boosting jailbreak attack with momentum,\" in ICASSP 2025-2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2025, pp. 1-5.", + "[533] Y. Zhao, W. Zheng, T. Cai, D. Xuan Long, K. Kawaguchi, A. Goyal, and M. Q. Shieh, \"Accelerating greedy coordinate gradient and general prompt optimization via probe sampling,\" Advances in Neural Information Processing Systems, vol. 37, pp. 53710-53731, 2024.", + "[534] X. Liu, N. Xu, M. Chen, and C. Xiao, \"Autodan: Generating stealthy jailbreak prompts on aligned large language models,\" arXiv preprint arXiv:2310.04451, 2023.", + "[535] S. Zhu, R. Zhang, B. An, G. Wu, J. Barrow, Z. Wang, F. Huang, A. Nenkova, and T. Sun, \"Autodan: interpretable gradient-based adversarial attacks on large language models,\" arXiv preprint arXiv:2310.15140, 2023.", + "[536] A. Mehrotra, M. Zampetakis, P. Kassianik, B. Nelson, H. Anderson, Y. Singer, and A. Karbasi, \"Tree of attacks: Jailbreaking black-box llms automatically,\" Advances in Neural Information Processing Systems, vol. 37, pp. 61-65, 2024.", + "[537] C. Sitawarin, N. Mu, D. Wagner, and A. Araujo," + ], + "bbox": [ + 506, + 54, + 921, + 941 + ], + "page_idx": 53 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 53 + }, + { + "type": "page_number", + "text": "54", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 53 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "\"Pal: Proxy-guided black-box attack on large language models,\" arXiv preprint arXiv:2402.09674, 2024.", + "[538] G. Deng, Y. Liu, Y. Li, K. Wang, Y. Zhang, Z. Li, H. Wang, T. Zhang, and Y. Liu, \"Masterkey: Automated jailbreak across multiple large language model chatbots,\" arXiv preprint arXiv:2307.08715, 2023.", + "[539] X. Liu, P. Li, E. Suh, Y. Vorobeychik, Z. Mao, S. Jha, P. McDaniel, H. Sun, B. Li, and C. Xiao, \"Autodanturbo: A lifelong agent for strategy self-exploration to jailbreak llms,\" arXiv preprint arXiv:2410.05295, 2024.", + "[540] Y. Liu, X. He, M. Xiong, J. Fu, S. Deng, and B. Hooi, \"Flipattack: Jailbreak llms via flipping,\" arXiv preprint arXiv:2410.02832, 2024.", + "[541] T. Wu, Z. Xue, Y. Liu, J. Zhang, B. Hooi, and S.-K. Ng, \"Geneshift: Impact of different scenario shift on jailbreaking llm,\" 2025. [Online]. Available: https://arxiv.org/abs/2504.08104", + "[542] F. Perez and I. Ribeiro, \"Ignore previous prompt: Attack techniques for language models,\" arXiv preprint arXiv:2211.09527, 2022.", + "[543] K. Greshake, S. Abdelnabi, S. Mishra, C. Endres, T. Holz, and M. Fritz, \"Not what you've signed up for: Compromising real-world llm-integrated applications with indirect prompt injection,\" in Proceedings of the 16th ACM Workshop on Artificial Intelligence and Security, 2023, pp. 79-90.", + "[544] Y. Liu, G. Deng, Y. Li, K. Wang, Z. Wang, X. Wang, T. Zhang, Y. Liu, H. Wang, Y. Zheng et al., \"Prompt injection attack against llm-integrated applications,\" arXiv preprint arXiv:2306.05499, 2023.", + "[545] S. Toyer, O. Watkins, E. A. Mendes, J. Svegliato, L. Bailey, T. Wang, I. Ong, K. Elmaaroufi, P. Abbeel, T. Darrell et al., \"Tensor trust: Interpretable prompt injection attacks from an online game,\" arXiv preprint arXiv:2311.01011, 2023.", + "[546] J. Shi, Z. Yuan, Y. Liu, Y. Huang, P. Zhou, L. Sun, and N. Z. Gong, \"Optimization-based prompt injection attack to lmm-as-a-judge,\" in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 660-674.", + "[547] X. Liu, Z. Yu, Y. Zhang, N. Zhang, and C. Xiao, \"Automatic and universal prompt injection attacks against large language models,\" arXiv preprint arXiv:2403.04957, 2024.", + "[548] X. Liu, S. Jha, P. McDaniel, B. Li, and C. Xiao, \"Autohijacker: Automatic indirect prompt injection against black-box llm agents.\"", + "[549] A. Al-Kaswan, M. Izadi, and A. Van Deursen, \"Targeted attack on gpt-neo for the satml language model data extraction challenge,\" arXiv preprint arXiv:2302.07735, 2023.", + "[550] E. Su, A. Vellore, A. Chang, R. Mura, B. Nelson, P. Kassianik, and A. Karbasi, \"Extracting memorized training data via decomposition,\" arXiv preprint arXiv:2409.12367, 2024.", + "[551] J. Huang, H. Shao, and K. C.-C. Chang, \"Are large pre-trained language models leaking your personal information?\" arXiv preprint arXiv:2205.12628, 2022.", + "[552] Z. Zhang, J. Wen, and M. Huang, \"Ethicist: Targeted training data extraction through loss smoothed soft prompting and calibrated confidence estimation,\"" + ], + "bbox": [ + 76, + 53, + 491, + 943 + ], + "page_idx": 54 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "arXiv preprint arXiv:2307.04401, 2023.", + "[553] K. K. Nakka, A. Frikha, R. Mendes, X. Jiang, and X. Zhou, \"Pii-compass: Guiding llm training data extraction prompts towards the target pii via grounding,\" arXiv preprint arXiv:2407.02943, 2024.", + "[554] Z. Wang, R. Bao, Y. Wu, J. Taylor, C. Xiao, F. Zheng, W. Jiang, S. Gao, and Y. Zhang, \"Unlocking memorization in large language models with dynamic soft prompting,\" arXiv preprint arXiv:2409.13853, 2024.", + "[555] J. G. Wang, J. Wang, M. Li, and S. Neel, \"Pandora's white-box: Precise training data detection and extraction in large language models,\" arXiv preprint arXiv:2402.17012, 2024.", + "[556] Z. Sha and Y. Zhang, \"Prompt stealing attacks against large language models,\" arXiv preprint arXiv:2402.12959, 2024.", + "[557] C. Zhang, J. X. Morris, and V. Shmatikov, \"Extracting prompts by inverting llm outputs,\" arXiv preprint arXiv:2405.15012, 2024.", + "[558] Y. Yang, C. Li, Y. Jiang, X. Chen, H. Wang, X. Zhang, Z. Wang, and S. Ji, \"Prsa: Prompt stealing attacks against large language models,\" arXiv preprint arXiv:2402.19200, 2024.", + "[559] Y. Zeng, H. Lin, J. Zhang, D. Yang, R. Jia, and W. Shi, \"How johnny can persuade llms to jailbreak them: Rethinking persuasion to challenge ai safety by humanizing llms,\" in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2024, pp. 14322-14350.", + "[560] X. Shen, Z. Chen, M. Backes, Y. Shen, and Y. Zhang, \"do anything now\": Characterizing and evaluating in-the-wild jailbreak prompts on large language models,\" in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 1671-1685.", + "[561] Z. Wang, W. Xie, B. Wang, E. Wang, Z. Gui, S. Ma, and K. Chen, \"Foot in the door: Understanding large language model jailbreaking via cognitive psychology,\" arXiv preprint arXiv:2402.15690, 2024.", + "[562] M. Samvelyan, S. C. Raparthy, A. Lupu, E. Hambro, A. Markosyan, M. Bhatt, Y. Mao, M. Jiang, J. Parker-Holder, J. Foerster et al., \"Rainbow teaming: Open-ended generation of diverse adversarial prompts,\" Advances in Neural Information Processing Systems, vol. 37, pp. 69747-69786, 2024.", + "[563] H. Jin, R. Chen, A. Zhou, Y. Zhang, and H. Wang, \"Guard: Role-playing to generate natural-language jailbreakings to test guideline adherence of large language models,\" arXiv preprint arXiv:2402.03299, 2024.", + "[564] Y. Yuan, W. Jiao, W. Wang, J.-t. Huang, P. He, S. Shi, and Z. Tu, \"Gpt-4 is too smart to be safe: Stealthy chat with llms via cipher,\" arXiv preprint arXiv:2308.06463, 2023.", + "[565] H. Lv, X. Wang, Y. Zhang, C. Huang, S. Dou, J. Ye, T. Gui, Q. Zhang, and X. Huang, \"Codechameleon: Personalized encryption framework for jailbreaking large language models,\" arXiv preprint arXiv:2402.16717, 2024.", + "[566] F. Jiang, Z. Xu, L. Niu, Z. Xiang, B. Ramasubramanian, B. Li, and R. Poovendran, \"Artprompt: Ascii art-based jailbreak attacks against aligned llms,\" in Proceedings" + ], + "bbox": [ + 506, + 53, + 921, + 943 + ], + "page_idx": 54 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 54 + }, + { + "type": "page_number", + "text": "55", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 54 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2024, pp. 15 157-15 173.", + "[567] C. Anil, E. Durmus, N. Panickssery, M. Sharma, J. Benton, S. Kundu, J. Batson, M. Tong, J. Mu, D. Ford et al., \"Many-shot jailbreaking,\" Advances in Neural Information Processing Systems, vol. 37, pp. 129-696-129742, 2024.", + "[568] Z.-X. Yong, C. Menghini, and S. H. Bach, \"Low-resource languages jailbreak gpt-4,\" arXiv preprint arXiv:2310.02446, 2023.", + "[569] W. Wang, Z. Tu, C. Chen, Y. Yuan, J.-T. Huang, W. Jiao, and M. R. Lyu, \"All languages matter: On the multilingual safety of llms,\" in Annual Meeting of the Association for Computational Linguistics, 2024. [Online]. Available: https://api-semanticscholar.org/ CorpusID:271931322", + "[570] Z. Wei, Y. Wang, A. Li, Y. Mo, and Y. Wang, \"Jailbreak and guard aligned language models with only few in-context demonstrations,\" arXiv preprint arXiv:2310.06387, 2023.", + "[571] N. Xu, F. Wang, B. Zhou, B. Z. Li, C. Xiao, and M. Chen, \"Cognitive overload: Jailbreaking large language models with overloaded logical thinking,\" arXiv preprint arXiv:2311.09827, 2023.", + "[572] P. Ding, J. Kuang, D. Ma, X. Cao, Y. Xian, J. Chen, and S. Huang, \"A wolf in sheep's clothing: Generalized nested jailbreak prompts can fool large language models easily,\" arXiv preprint arXiv:2311.08268, 2023.", + "[573] B. Upadhayay and V. Behzadan, \"Sandwich attack: Multi-language mixture adaptive attack on llms,\" arXiv preprint arXiv:2404.07242, 2024.", + "[574] D. Yao, J. Zhang, I. G. Harris, and M. Carlsson, \"Fuzzllm: A novel and universal fuzzing framework for proactively discovering jailbreak vulnerabilities in large language models,\" in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 4485-4489.", + "[575] B. Li, H. Xing, C. Huang, J. Qian, H. Xiao, L. Feng, and C. Tian, \"Structuralsleight: Automated jailbreak attacks on large language models utilizing uncommon text-encoded structure,\" arXiv e-prints, pp. arXiv-2406, 2024.", + "[576] A. Paulus, A. Zharmagambetov, C. Guo, B. Amos, and Y. Tian, \"Advprompter: Fast adaptive adversarial prompting for llms,\" arXiv preprint arXiv:2404.16873, 2024.", + "[577] A. Wei, N. Haghtalab, and J. Steinhardt, \"Jailbroken: How does llm safety training fail?\" Advances in Neural Information Processing Systems, vol. 36, pp. 80079-80110, 2023.", + "[578] Z. Chen, Z. Zhao, W. Qu, Z. Wen, Z. Han, Z. Zhu, J. Zhang, and H. Yao, \"Pandora: Detailed llm jailbreaking via collaborated phishing agents with decomposed reasoning,\" in ICLR 2024 Workshop on Secure and Trustworthy Large Language Models, 2024.", + "[579] E. Perez, S. Huang, F. Song, T. Cai, R. Ring, J. Aslanides, A. Glaese, N. McAleese, and G. Irving, \"Red teaming language models with language models,\" arXiv preprint arXiv:2202.03286, 2022.", + "[580] R. Shah, S. Pour, A. Tagade, S. Casper, J. Rando et al.," + ], + "bbox": [ + 76, + 53, + 491, + 941 + ], + "page_idx": 55 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "\"Scalable and transferable black-box jailbreaks for language models via persona modulation,\" arXiv preprint arXiv:2311.03348, 2023.", + "[581] X. Guo, F. Yu, H. Zhang, L. Qin, and B. Hu, \"Coldattack: Jailbreaking lms with stealthiness and controllability,\" arXiv preprint arXiv:2402.08679, 2024.", + "[582] J. Yu, H. Luo, J. Y.-C. Hu, W. Guo, H. Liu, and X. Xing, \"Enhancing jailbreak attack against large language models through silent tokens,\" arXiv preprint arXiv:2405.20653, 2024.", + "[583] Z.-W. Hong, I. Shenfeld, T.-H. Wang, Y.-S. Chuang, A. Pareja, J. Glass, A. Srivastava, and P. Agrawal, \"Curiosity-driven red-teaming for large language models,\" arXiv preprint arXiv:2402.19464, 2024.", + "[584] X. Zheng, T. Pang, C. Du, Q. Liu, J. Jiang, and M. Lin, \"Improved few-shot jailbreaking can circumvent aligned language models and their defenses,\" Advances in Neural Information Processing Systems, vol. 37, pp. 32-856-32-887, 2024.", + "[585] Z. Xiao, Y. Yang, G. Chen, and Y. Chen, \"Distract large language models for automatic jailbreak attack,\" arXiv preprint arXiv:2403.08424, 2024.", + "[586] Z. Chang, M. Li, Y. Liu, J. Wang, Q. Wang, and Y. Liu, \"Play guessing game with llm: Indirect jailbreak attack with implicit clues,\" arXiv preprint arXiv:2402.09091, 2024.", + "[587] J. Yu, X. Lin, Z. Yu, and X. Xing, \"Gptfuzzer: Red teaming large language models with auto-generated jailbreak prompts,\" arXiv preprint arXiv:2309.10253, 2023.", + "[588] W. Jiang, Z. Wang, J. Zhai, S. Ma, Z. Zhao, and C. Shen, \"Unlocking adversarial suffix optimization without affirmative phrases: Efficient black-box jailbreaking via llm as optimizer,\" arXiv preprint arXiv:2408.11313, 2024.", + "[589] J. Zhang, Z. Wang, R. Wang, X. Ma, and Y.-G. Jiang, \"Enja: Ensemble jailbreak on large language models,\" arXiv preprint arXiv:2408.03603, 2024.", + "[590] X. Zhao, X. Yang, T. Pang, C. Du, L. Li, Y.-X. Wang, and W. Y. Wang, \"Weak-to-strong jailbreaking on large language models,\" arXiv preprint arXiv:2401.17256, 2024.", + "[591] B. Upadhayay, V. Behzadan, and A. Karbasi, \"Cognitive overload attack: Prompt injection for long context,\" arXiv preprint arXiv:2410.11272, 2024.", + "[592] H. Kwon and W. Pak, \"Text-based prompt injection attack using mathematical functions in modern large language models,\" *Electronics*, vol. 13, no. 24, p. 5008, 2024.", + "[593] E. Bagdasaryan, T.-Y. Hsieh, B. Nassi, and V. Shmatikov, \"Abusing images and sounds for indirect instruction injection in multi-modal llms,\" arXiv preprint arXiv:2307.10490, 2023.", + "[594] D. Pasquini, M. Strohmeier, and C. Troncoso, \"Neural exec: Learning (and learning from) execution triggers for prompt injection attacks,\" in Proceedings of the 2024 Workshop on Artificial Intelligence and Security, 2024, pp. 89-100.", + "[595] Z. Shao, H. Liu, J. Mu, and N. Z. Gong, \"Making llms vulnerable to prompt injection via poisoning alignment,\" arXiv preprint arXiv:2410.14827, 2024.", + "[596] Y. Yang, H. Yao, B. Yang, Y. He, Y. Li, T. Zhang," + ], + "bbox": [ + 506, + 53, + 921, + 941 + ], + "page_idx": 55 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 55 + }, + { + "type": "page_number", + "text": "56", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 55 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Z. Qin, and K. Ren, \"Tapi: Towards target-specific and adversarial prompt injection against code llms,\" arXiv preprint arXiv:2407.09164, 2024.", + "[597] Y. Ren, \"F2a: An innovative approach for prompt injection by utilizing feign security detection agents,\" arXiv preprint arXiv:2410.08776, 2024.", + "[598] R. Pedro, D. Castro, P. Carreira, and N. Santos, \"From prompt injections to sql injection attacks: How protected is your llm-integrated web application?\" arXiv preprint arXiv:2308.01990, 2023.", + "[599] Y. Lee, T. Park, Y. Lee, J. Gong, and J. Kang, \"Exploring potential prompt injection attacks in federated military Ilms and their mitigation,\" arXiv preprint arXiv:2501.18416, 2025.", + "[600] D. Lee and M. Tiwari, \"Prompt infection: Llm-to-llm prompt injection within multi-agent systems,\" arXiv preprint arXiv:2410.07283, 2024.", + "[601] W. Zhang, X. Kong, C. Dewitt, T. Braunl, and J. B. Hong, \"A study on prompt injection attack against lvm-integrated mobile robotic systems,\" in 2024 IEEE 35th International Symposium on Software Reliability Engineering Workshops (ISSREW). IEEE, 2024, pp. 361-368.", + "[602] W. Meng, Z. Guo, L. Wu, C. Gong, W. Liu, W. Li, C. Wei, and W. Chen, \"Rr: Unveiling llm training privacy through recollection and ranking,\" arXiv preprint arXiv:2502.12658, 2025.", + "[603] B. Jayaraman, E. Ghosh, H. Inan, M. Chase, S. Roy, and W. Dai, \"Active data pattern extraction attacks on generative language models,\" arXiv preprint arXiv:2207.10802, 2022.", + "[604] Z. Zeng, T. Xiang, S. Guo, J. He, Q. Zhang, G. Xu, and T. Zhang, \"Contrast-then-approximate: Analyzing keyword leakage of generative language models,\" IEEE Transactions on Information Forensics and Security, 2024.", + "[605] C. Jiang, X. Pan, G. Hong, C. Bao, and M. Yang, \"Rag-thief: Scalable extraction of private data from retrieval-augmented generation applications with agent-based attacks,\" arXiv preprint arXiv:2411.14110, 2024.", + "[606] Z. Qi, H. Zhang, E. Xing, S. Kakade, and H. Lakkaraju, \"Follow my instruction and spill the beans: Scalable data extraction from retrieval-augmented generation systems,\" arXiv preprint arXiv:2402.17840, 2024.", + "[607] S. Zeng, J. Zhang, P. He, Y. Xing, Y. Liu, H. Xu, J. Ren, S. Wang, D. Yin, Y. Chang et al., \"The good and the bad: Exploring privacy issues in retrieval-augmented generation (rag),\" arXiv preprint arXiv:2402.16893, 2024.", + "[608] Y. Peng, J. Wang, H. Yu, and A. Houmansadr, \"Data extraction attacks in retrieval-augmented generation via backdoors,\" arXiv preprint arXiv:2411.01705, 2024.", + "[609] A. Panda, C. A. Choquette-Choo, Z. Zhang, Y. Yang, and P. Mittal, \"Teach llms to phish: Stealing private information from language models,\" arXiv preprint arXiv:2403.00871, 2024.", + "[610] L. Lu, Z. Zuo, Z. Sheng, and P. Zhou, “Merger-as-a-stealer: Stealing targeted pii from aligned llms with model merging,” arXiv preprint arXiv:2502.16094, 2025.", + "[611] X. Chen, S. Tang, R. Zhu, S. Yan, L. Jin, Z. Wang, L. Su," + ], + "bbox": [ + 75, + 53, + 491, + 943 + ], + "page_idx": 56 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Z. Zhang, X. Wang, and H. Tang, \"The janus interface: How fine-tuning in large language models amplifies the privacy risks,\" in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 1285-1299.", + "[612] R. Panchendrarajan and S. Bhoi, \"Dataset reconstruction attack against language models,\" 2021.", + "[613] M. R. U. Rashid, V. A. Dasu, K. Gu, N. Sultana, and S. Mehnaz, \"Fltrojan: Privacy leakage attacks against federated language models through selective weight tampering,\" arXiv preprint arXiv:2310.16152, 2023.", + "[614] J. Dentan, A. Paran, and A. Shabou, \"Reconstructing training data from document understanding models,\" in 33rd USENIX Security Symposium (USENIX Security 24), 2024, pp. 6813-6830.", + "[615] J. Hósciłowicz, P. Popiołek, J. Rudkowski, J. Bieniasz, and A. Janicki, \"Unconditional token forcing: Extracting text hidden within llm,\" in 2024 19th Conference on Computer Science and Intelligence Systems (FedCSIS). IEEE, 2024, pp. 621-624.", + "[616] A. Al-Kaswan, M. Izadi, and A. Van Deursen, \"Traces of memorisation in large language models for code,\" in Proceedings of the IEEE/ACM 46th International Conference on Software Engineering, 2024, pp. 1-12.", + "[617] Y. Nie, C. Wang, K. Wang, G. Xu, G. Xu, and H. Wang, \"Decoding secret memorization in code llms through token-level characterization,\" arXiv preprint arXiv:2410.08858, 2024.", + "[618] E. Lehman, S. Jain, K. Pichotta, Y. Goldberg, and B. C. Wallace, \"Does bert pretrained on clinical notes reveal sensitive data?\" arXiv preprint arXiv:2104.07762, 2021.", + "[619] A. Diera, N. Lell, A. Garifullina, and A. Scherp, \"Memorization of named entities in fine-tuned bert models,\" in International Cross-Domain Conference for Machine Learning and Knowledge Extraction. Springer, 2023, pp. 258-279.", + "[620] R. Zhang, S. Hidano, and F. Koushanfar, \"Text re- vealer: Private text reconstruction via model inversion attacks against transformers,\" arXiv preprint arXiv:2209.10505, 2022.", + "[621] Y. Huang, Y. Li, W. Wu, J. Zhang, and M. R. Lyu, \"Your code secret belongs to me: neural code completion tools can memorize hard-coded credentials,\" Proceedings of the ACM on Software Engineering, vol. 1, no. FSE, pp. 2515-2537, 2024.", + "[622] T. Tiwari and G. E. Suh, \"Sequence-level analysis of leakage risk of training data in large language models,\" arXiv preprint arXiv:2412.11302, 2024.", + "[623] H. Shao, J. Huang, S. Zheng, and K. C.-C. Chang, \"Quantifying association capabilities of large language models and its implications on privacy leakage,\" arXiv preprint arXiv:2305.12707, 2023.", + "[624] Y. More, P. Ganesh, and G. Farnadi, \"Towards more realistic extraction attacks: An adversarial perspective,\" arXiv preprint arXiv:2407.02596, 2024.", + "[625] R. Staab, M. Vero, M. Balunović, and M. Vechev, \"Beyond memorization: Violating privacy via inference with large language models,\" arXiv preprint arXiv:2310.07298, 2023.", + "[626] H. Xu, Z. Zhang, X. Yu, Y. Wu, Z. Zha, B. Xu, W. Xu, M. Hu, and K. Peng, \"Targeted training data extrac" + ], + "bbox": [ + 506, + 53, + 921, + 943 + ], + "page_idx": 56 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 56 + }, + { + "type": "page_number", + "text": "57", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 56 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "tion—neighborhood comparison-based membership inference attacks in large language models,\" Applied Sciences, vol. 14, no. 16, p. 7118, 2024.", + "[627] A. Karamolegkou, J. Li, L. Zhou, and A. Søgaard, \"Copyright violations and large language models,\" arXiv preprint arXiv:2310.13771, 2023.", + "[628] X. Zheng, H. Han, S. Shi, Q. Fang, Z. Du, X. Hu, and Q. Guo, \"Inputsnatch: Stealing input in llm services via timing side-channel attacks,\" arXiv preprint arXiv:2411.18191, 2024.", + "[629] Y. Dong, R. Mu, G. Jin, Y. Qi, J. Hu, X. Zhao, J. Meng, W. Ruan, and X. Huang, \"Building guardrails for large language models,\" arXiv preprint arXiv:2402.01822, 2024.", + "[630] N. Jain, A. Schwarzschild, Y. Wen, G. Somepalli, J. Kirchenbauer, P. yeh Chiang, M. Goldblum, A. Saha, J. Geiping, and T. Goldstein, \"Baseline defenses for adversarial attacks against aligned language models,\" 2024.", + "[631] H. Lin, Y. Lao, T. Geng, T. Yu, and W. Zhao, \"Uniguardian: A unified defense for detecting prompt injection, backdoor attacks and adversarial attacks in large language models,\" arXiv preprint arXiv:2502.13141, 2025.", + "[632] Z. Hu, G. Wu, S. Mitra, R. Zhang, T. Sun, H. Huang, and V. Swaminathan, \"Token-level adversarial prompt detection based on perplexity measures and contextual information,\" in ICLR 2025 Workshop on Building Trust in Language Models and Applications, 2025.", + "[633] Y. Gou, K. Chen, Z. Liu, L. Hong, H. Xu, Z. Li, D.-Y. Yeung, J. T. Kwok, and Y. Zhang, \"Eyes closed, safety on: Protecting multimodal llms via image-to-text transformation,\" in European Conference on Computer Vision, 2024, pp. 388-404.", + "[634] S. Armstrong, M. Franklin, C. Stevens, and R. Gorman, \"Defense against the dark prompts: Mitigating best-of-n jailbreaking with prompt evaluation,\" arXiv preprint arXiv:2107.03374, 2025.", + "[635] Y. Xie, M. Fang, R. Pi, and N. Gong, \"GradSafe: Detecting jailbreak prompts for LLMs via safety-critical gradient analysis,\" in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), L.-W. Ku, A. Martins, and V. Srikumar, Eds., 2024, pp. 507-518.", + "[636] B. Peng, Z. Bi, Q. Niu, M. Liu, P. Feng, T. Wang, L. K. Yan, Y. Wen, Y. Zhang, and C. H. Yin, \"Jailbreaking and mitigation of vulnerabilities in large language models,\" arXiv preprint arXiv:2410.15236, 2024.", + "[637] A. Kumar, C. Agarwal, S. Srinivas, A. J. Li, S. Feizi, and H. Lakkaraju, \"Certifying LLM safety against adversarial prompting,\" in First Conference on Language Modeling, 2024.", + "[638] X. Zhang, C. Zhang, T. Li, Y. Huang, X. Jia, M. Hu, J. Zhang, Y. Liu, S. Ma, and C. Shen, \"Jailguard: A universal detection framework for llm prompt-based attacks,\" arXiv preprint arXiv:2312.10766, 2023.", + "[639] Y. Liu, Y. Jia, R. Geng, J. Jia, and N. Z. Gong, \"Formalizing and benchmarking prompt injection attacks and defenses,\" in Proceedings of the 33rd USENIX Conference on Security Symposium, 2024.", + "[640] X. Suo, \"Signed-prompt: A new approach to prevent" + ], + "bbox": [ + 76, + 53, + 491, + 943 + ], + "page_idx": 57 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "prompt injection attacks against llm-integrated applications,\" in AIP Conference Proceedings, vol. 3194, no. 1. AIP Publishing, 2024.", + "[641] L. Yan, Z. Zhang, G. Tao, K. Zhang, X. Chen, G. Shen, and X. Zhang, \"Parafuzz: An interpretability-driven technique for detecting poisoned samples in nlp,\" Advances in Neural Information Processing Systems, vol. 36, pp. 66755-66767, 2023.", + "[642] X. Hu, P.-Y. Chen, and T.-Y. Ho, \"Gradient cuff: Detecting jailbreak attacks on large language models by exploring refusal loss landscapes,\" in Advances in Neural Information Processing Systems, vol. 37, 2024, pp. 126-265-126-296.", + "[643] G. Alon and M. J. Kamfonas, \"Detecting language model attacks with perplexity,\" 2024.", + "[644] J. Ji, B. Hou, A. Robey, G. J. Pappas, H. Hassani, Y. Zhang, E. Wong, and S. Chang, \"Defending large language models against jailbreak attacks via semantic smoothing,\" CoRR, 2024.", + "[645] M. Phute, A. Helbling, M. Hull, S. Peng, S. Szyller, C. Cornelius, and D. H. Chau, \"Llm self defense: By self examination, llms know they are being tricked,\" arXiv preprint arXiv:2308.07308, 2024.", + "[646] L. N. Candogan, Y. Wu, E. A. Rocamora, G. G. Chrysos, and V. Cevher, \"Single-pass detection of jailbreaking input in large language models,\" arXiv preprint arXiv:2502.15435, 2025.", + "[647] B. Cao, Y. Cao, L. Lin, and J. Chen, “Defending against alignment-breaking attacks via robustly aligned LLM,” in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), L.-W. Ku, A. Martins, and V. Srikumar, Eds., 2024, pp. 10542-10560.", + "[648] Y. Zhang, L. Ding, L. Zhang, and D. Tao, \"Intention analysis makes LLMs a good jailbreak defender,\" in Proceedings of the 31st International Conference on Computational Linguistics, 2025, pp. 2947-2968.", + "[649] S. Han, K. Rao, A. Ettinger, L. Jiang, B. Y. Lin, N. Lambert, Y. Choi, and N. Dziri, \"Wildguard: Open one-stop moderation tools for safety risks, jailbreaks, and refusals of llms,\" in The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track.", + "[650] M. Pisano, P. Ly, A. Sanders, B. Yao, D. Wang, T. Strzalkowski, and M. Si, \"Bergeron: Combating adversarial attacks through a conscience-based alignment framework,\" arXiv preprint arXiv:2312.00029, 2024.", + "[651] A. Robey, E. Wong, H. Hassani, and G. J. Pappas, \"Smoothllm: Defending large language models against jailbreaking attacks,\" arXiv preprint arXiv:2310.03684, 2023.", + "[652] J. Ji, B. Hou, Z. Zhang, G. Zhang, W. Fan, Q. Li, Y. Zhang, G. Liu, S. Liu, and S. Chang, \"Advancing the robustness of large language models through self-denoised smoothing,\" in Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers), 2024, pp. 246-257.", + "[653] J. Yi, Y. Xie, B. Zhu, K. Hines, E. Kiciman, G. Sun, X. Xie, and F. Wu, \"Benchmarking and defending against indirect prompt injection attacks on large lan" + ], + "bbox": [ + 506, + 53, + 921, + 943 + ], + "page_idx": 57 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 57 + }, + { + "type": "page_number", + "text": "58", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 57 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "guage models,\" CoRR, 2023.", + "[654] X. Song, S. Duan, and G. Liu, \"Alis: Aligned llm instruction security strategy for unsafe input prompt,\" in Proceedings of the 31st International Conference on Computational Linguistics, 2025, pp. 9124-9146.", + "[655] Y. Wang, Z. Shi, A. Bai, and C.-J. Hsieh, \"Defending Ilms against jailbreaking attacks via backtranslation,\" in Findings of the Association for Computational Linguistics: ACL 2024, L.-W. Ku, A. Martins, and V. Srikumar, Eds., 2024, pp. 16031-16046.", + "[656] E. Zverev, S. Abdelnabi, M. Fritz, and C. H. Lampert, \"Can LLMs separate instructions from data? and what do we even mean by that?\" CoRR, 2024.", + "[657] Y. Dong, R. Mu, G. Jin, Y. Qi, J. Hu, X. Zhao, J. Meng, W. Ruan, and X. Huang, \"Building guardrails for large language models,\" arXiv preprint arXiv:2402.01822, 2024.", + "[658] D. Kumar, Y. A. AbuHashem, and Z. Durmeric, \"Watch your language: Investigating content moderation with large language models,\" in Proceedings of the International AAAI Conference on Web and Social Media, vol. 18, 2024, pp. 865-878.", + "[659] T. Rebedea, R. Dinu, M. N. Sreedhar, C. Parisien, and J. Cohen, \"Nemo guardrails: A toolkit for controllable and safe llm applications with programmable rails,\" in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, 2023, pp. 431-445.", + "[660] OpenAI, \"Improving model safety behavior with rule-based rewards,\" https://openai.com/index/improving-model-safety-behavior-with-rule-based-re 2025, accessed: 2025-03-24.", + "[661] H. Ma, C. Zhang, H. Fu, P. Zhao, and B. Wu, \"Adapting large language models for content moderation: Pitfalls in data engineering and supervised fine-tuning,\" arXiv preprint arXiv:2310.03400, 2023.", + "[662] M. Phute, A. Helbling, M. Hull, S. Peng, S. Szyller, C. Cornelius, and D. H. Chau, \"Llm self defense: By self examination, llms know they are being tricked,\" arXiv preprint arXiv:2308.07308, 2023.", + "[663] Z. Gou, Z. Shao, Y. Gong, Y. Shen, Y. Yang, N. Duan, and W. Chen, \"Critic: Large language models can self-correct with tool-interactive critiquing,\" arXiv preprint arXiv:2305.11738, 2023.", + "[664] C. Lu, S. Holt, C. Fanconi, A. J. Chan, J. Foerster, M. van der Schaar, and R. T. Lange, \"Discovering preference optimization algorithms with and for large language models,\" in Advances in Neural Information Processing Systems, vol. 37, 2024, pp. 86528-86573.", + "[665] A. Madaan, N. Tandon, P. Gupta, S. Hallinan, L. Gao, S. Wiegreffe, U. Alon, N. Dziri, S. Prabhumoye, Y. Yang et al., \"Self-refine: Iterative refinement with self-feedback,\" Advances in Neural Information Processing Systems, vol. 36, pp. 46534-46594, 2023.", + "[666] D. Jiang, X. Ren, and B. Y. Lin, \"Llm-blender: Ensemble large language models with pairwise ranking and generative fusion,\" in Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2023, pp. 14165-14178.", + "[667] Z. Lai, X. Zhang, and S. Chen, \"Adaptive ensembles" + ], + "bbox": [ + 76, + 53, + 491, + 941 + ], + "page_idx": 58 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "of fine-tuned transformers for llm-generated text detection,\" in 2024 International Joint Conference on Neural Networks. IEEE, 2024, pp. 1-7.", + "[668] C. Xiong, X. Qi, P.-Y. Chen, and T.-Y. Ho, \"Defensive prompt patch: A robust and interpretable defense of llms against jailbreak attacks,\" arXiv preprint arXiv:2405.20099, 2024.", + "[669] Z. Zhang, Q. Zhang, and J. Foerster, “Parden, can you repeat that? defending against jailbreaks via repetition,” in Proceedings of the 41st International Conference on Machine Learning, 2024, pp. 60271-60287.", + "[670] Z. Yuan, Z. Xiong, Y. Zeng, N. Yu, R. Jia, D. Song, and B. Li, \"Rigorllm: resilient guardrails for large language models against undesired content,\" in Proceedings of the 41st International Conference on Machine Learning, 2024, pp. 57-953-57-965.", + "[671] M. Cao, M. Fatemi, J. C. Cheung, and S. Shabanian, \"Systematic rectification of language models via dead-end analysis,\" in The Eleventh International Conference on Learning Representations, 2023.", + "[672] F. Faal, K. Schmitt, and J. Y. Yu, \"Reward modeling for mitigating toxicity in transformer-based language models,\" Applied Intelligence, vol. 53, no. 7, p. 8421-8435, 2022.", + "[673] W. Zeng, Y. Liu, R. Mullins, L. Peran, J. Fernandez, H. Harkous, K. Narasimhan, D. Proud, P. Kumar, B. Radharapu et al., \"Shieldgemma: Generative ai content moderation based on gemma,\" arXiv preprint arXiv:2407.21772, 2024.", + "[674] Z. Wang, F. Yang, L. Wang, P. Zhao, H. Wang, L. Chen, *ards/, Q. Lin, and K.-F. Wong, \"SELF-GUARD: Empower the LLM to safeguard itself,\" in *Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics*, 2024, pp. 1648-1668.", + "[675] S. Ghosh, P. Varshney, E. Galinkin, and C. Parisien, \"Aegis: Online adaptive ai content safety moderation with ensemble of llm experts,\" arXiv preprint arXiv:2404.05993, 2024.", + "[676] W. Wang, J.-T. Huang, W. Wu, J. Zhang, Y. Huang, S. Li, P. He, and M. R. Lyu, \"Mttm: Metamorphic testing for textual content moderation software,\" 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE), pp. 2387-2399, 2023. [Online]. Available: https://api-semanticscholar.org/ CorpusID:256826966", + "[677] K.-L. Chiu, A. Collins, and R. Alexander, \"Detecting hate speech with gpt-3,\" arXiv preprint arXiv:2103.12407, 2021.", + "[678] J. Kim, A. Derakhshan, and I. G. Harris, \"Robust safety classifier for large language models: Adversarial prompt shield,\" arXiv preprint arXiv:2311.00172, 2023.", + "[679] B. Krause, A. D. Gotmare, B. McCann, N. S. Keskar, S. Joty, R. Socher, and N. F. Rajani, \"Gedi: Generative discriminator guided sequence generation,\" in Findings of the Association for Computational Linguistics: EMNLP 2021, 2021, pp. 4929-4952.", + "[680] Q. Liu, Z. Zhou, L. He, Y. Liu, W. Zhang, and S. Su, \"Alignment-enhanced decoding: Defending jailbreaks via token-level adaptive refining of probability distributions,\" in Proceedings of the 2024 Conference on" + ], + "bbox": [ + 506, + 53, + 921, + 941 + ], + "page_idx": 58 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 58 + }, + { + "type": "page_number", + "text": "59", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 58 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Empirical Methods in Natural Language Processing, 2024, pp. 2802-2816.", + "[681] A. Liu, M. Sap, X. Lu, S. Swayamdipta, C. Bhagavatula, N. A. Smith, and Y. Choi, \"Dexperts: Decoding-time controlled text generation with experts and anti-experts,\" in Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics, 2021, pp. 6691-6706.", + "[682] T. Radcliffe, E. Lockhart, and J. Wetherington, \"Automated prompt engineering for semantic vulnerabilities in large language models,\" Authorea Preprints, 2024.", + "[683] F. Trad and A. Chehab, \"Prompt engineering or finetuning? a case study on phishing detection with large language models,\" Machine Learning and Knowledge Extraction, vol. 6, no. 1, pp. 367-384, 2024.", + "[684] A. Zhou, B. Li, and H. Wang, \"Robust prompt optimization for defending language models against jailbreaking attacks,\" in Advances in Neural Information Processing Systems, vol. 37. Curran Associates, Inc., 2024, pp. 40184-40211.", + "[685] Y. Mo, Y. Wang, Z. Wei, and Y. Wang, \"Fight back against jailbreaking via prompt adversarial tuning,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024.", + "[686] Y. Zhang, L. Ding, L. Zhang, and D. Tao, \"Intention analysis makes lms a good jailbreak defender,\" in Proceedings of the 31st International Conference on Computational Linguistics, 2025, pp. 2947-2968.", + "[687] Y. Chen, H. Li, Z. Zheng, Y. Song, D. Wu, and B. Hooi, \"Defense against prompt injection attack by leveraging attack techniques,\" arXiv preprint arXiv:2411.00459, 2024.", + "[688] Z. Zhang, J. Yang, P. Ke, F. Mi, H. Wang, and M. Huang, \"Defending large language models against jailbreaking attacks through goal prioritization,\" in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics, 2023, pp. 8865-8887.", + "[689] Y. Xie, J. Yi, J. Shao, J. Curl, L. Lyu, Q. Chen, X. Xie, and F. Wu, \"Defending chatgpt against jailbreak attack via self-reminders,\" Nature Machine Intelligence, vol. 5, no. 12, pp. 1486–1496, 2023.", + "[690] S. Chen, J. Piet, C. Sitawarin, and D. Wagner, \"Struq: Defending against prompt injection with structured queries,\" arXiv preprint arXiv:2402.06363, 2024.", + "[691] K. Hines, G. Lopez, M. Hall, F. Zarfati, Y. Zunger, and E. Kiciman, \"Defending against indirect prompt injection attacks with spotlighting,\" arXiv preprint arXiv:2403.14720, 2024.", + "[692] S. Slocum and D. Hadfield-Menell, \"Inverse prompt engineering for task-specific LLM safety,\" 2025. [Online]. Available: https://openreview.net/forum? id=3MDmM0rMPQ", + "[693] K. Edemacu and X. Wu, \"Privacy preserving prompt engineering: A survey,\" arXiv preprint arXiv:2404.06001, 2024.", + "[694] S. Utpala, S. Hooker, and P.-Y. Chen, \"Locally differentially private document generation using zero shot prompting,\" in Findings of the Association for Computational Linguistics: EMNLP 2023, 2023, pp. 8442-8457.", + "[695] H. Duan, A. Dziedzic, N. Papernot, and F. Boenisch," + ], + "bbox": [ + 76, + 53, + 491, + 941 + ], + "page_idx": 59 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "\"Flocks of stochastic parrots: Differentially private prompt learning for large language models,\" Advances in Neural Information Processing Systems, vol. 36, pp. 76852-76871, 2023.", + "[696] W. Wang, W. Jiao, J. Huang, R. Dai, J.-T. Huang, Z. Tu, and M. R. Lyu, \"Not all countries celebrate thanksgiving: On the cultural dominance in large language models,\" ArXiv, vol. abs/2310.12481, 2023. [Online]. Available: https://api_semanticscholar.org/ CorpusID:264305810", + "[697] M. Kaneko, D. Bollegala, N. Okazaki, and T. Baldwin, \"Evaluating gender bias in large language models via chain-of-thought prompting,\" arXiv preprint arXiv:2401.15585, 2024.", + "[698] X. He, S. Zannettou, Y. Shen, and Y. Zhang, \"You only prompt once: On the capabilities of prompt learning on large language models to tackle toxic content,\" in 2024 IEEE Symposium on Security and Privacy (SP). IEEE, 2024, pp. 770-787.", + "[699] X. Zou, Y. Chen, and K. Li, \"Is the system message really important to jailbreaks in large language models?\" arXiv preprint arXiv:2402.14857, 2024.", + "[700] R. Xu, Z. Qi, and W. Xu, \"Preemptive answer \"attacks\" on chain-of-thought reasoning,\" in Findings of the Association for Computational Linguistics ACL 2024, 2024, pp. 14708-14726.", + "[701] C. Zheng, F. Yin, H. Zhou, F. Meng, J. Zhou, K.-W. Chang, M. Huang, and N. Peng, \"On prompt-driven safeguarding for large language models,\" in Proceedings of the 41st International Conference on Machine Learning, ser. Proceedings of Machine Learning Research, vol. 235, 21-27 Jul 2024, pp. 61-613.", + "[702] Y. Wang, X. Liu, Y. Li, M. Chen, and C. Xiao, \"Adashield: Safeguarding multimodal large language models from structure-based attack via adaptive shield prompting,\" in European Conference on Computer Vision. Springer, 2024, pp. 77-94.", + "[703] Z. Shi, Z. Wang, Y. Su, W. Luo, H. Gao, F. Yang, R. Tang, and Y. Zhang, \"Robustness-aware automatic prompt optimization,\" arXiv preprint arXiv:2412.18196, 2024.", + "[704] Y. Wu, Y. Gao, B. Zhu, Z. Zhou, X. Sun, S. Yang, J.-G. Lou, Z. Ding, and L. Yang, \"Strago: Harnessing strategic guidance for prompt optimization,\" in Findings of the Association for Computational Linguistics: EMNLP 2024, 2024, pp. 10043-10061.", + "[705] F. Wu, N. Zhang, S. Jha, P. McDaniel, and C. Xiao, \"A new era in llm security: Exploring security concerns in real-world llm-based systems,\" arXiv preprint arXiv:2402.18649, 2024.", + "[706] A. Borzunov, M. Ryabinin, A. Chumachenko, D. Baranchuk, T. Dettmers, Y. Belkada, P. Samygin, and C. A. Raffel, \"Distributed inference and finetuning of large language models over the internet,\" Advances in neural information processing systems, vol. 36, pp. 12312-12331, 2023.", + "[707] A. Agrawal, N. Kedia, A. Panwar, J. Mohan, N. Kwa-tra, B. Gulavani, A. Tumanov, and R. Ramjee, \"Taming {Throughput-Latency} tradeoff in {LLM} inference with {Sarathi-Serve}\", in 18th USENIX Symposium on Operating Systems Design and Implementation (OSDI" + ], + "bbox": [ + 508, + 53, + 921, + 941 + ], + "page_idx": 59 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 59 + }, + { + "type": "page_number", + "text": "60", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 59 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "24), 2024, pp. 117-134.", + "[708] Y. Zhong, S. Liu, J. Chen, J. Hu, Y. Zhu, X. Liu, X. Jin, and H. Zhang, \" $\\{\\mathrm{DistServe}\\}$ : Disaggregating prefill and decoding for goodput-optimized large language model serving,\" in 18th USENIX Symposium on Operating Systems Design and Implementation (OSDI 24), 2024, pp. 193-210.", + "[709] H. Sun, Z. Chen, X. Yang, Y. Tian, and B. Chen, \"Tri force: Lossless acceleration of long sequence generation with hierarchical speculative decoding,\" in First Conference on Language Modeling, 2024.", + "[710] T. Cai, Y. Li, Z. Geng, H. Peng, J. D. Lee, D. Chen, and T. Dao, \"Medusa: Simple LLM inference acceleration framework with multiple decoding heads,\" in Proceedings of the 41st International Conference on Machine Learning, vol. 235. PMLR, 2024, pp. 5209-5235.", + "[711] J. Chen, V. Tiwari, R. Sadhukhan, Z. Chen, J. Shi, I. E.-H. Yen, and B. Chen, \"Magicdec: Breaking the latency-throughput tradeoff for long context generation with speculative decoding,\" arXiv preprint arXiv:2408.11049, 2024.", + "[712] C. Holmes, M. Tanaka, M. Wyatt, A. A. Awan, J. Rasley, S. Rajbhandari, R. Y. Aminabadi, H. Qin, A. Bakhtiari, L. Kurilenko et al., \"Deepspeed-fastgen: High-throughput text generation for llms via mii and deepspeed-inference,\" arXiv preprint arXiv:2401.08671, 2024.", + "[713] R. Svirschevski, A. May, Z. Chen, B. Chen, Z. Jia, and M. Ryabinin, \"Specexec: Massively parallel speculative decoding for interactive lmm inference on consumer devices,\" Advances in Neural Information Processing Systems, vol. 37, pp. 16342-16368, 2024.", + "[714] P. Wang, D. Zhang, L. Li, C. Tan, X. Wang, M. Zhang, K. Ren, B. Jiang, and X. Qiu, \"Inferaligner: Inference-time alignment for harmlessness through cross-model guidance,\" in Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, 2024, pp. 10460-10479.", + "[715] X. Wang, D. Wu, Z. Ji, Z. Li, P. Ma, S. Wang, Y. Li, Y. Liu, N. Liu, and J. Rahmel, \"Selfdefend: Llms can defend themselves against jailbreaking in a practical manner,\" CoRR, 2024.", + "[716] X. Hu, P.-Y. Chen, and T.-Y. Ho, \"Gradient cuff: Detecting jailbreak attacks on large language models by exploring refusal loss landscapes,\" arXiv preprint arXiv:2403.00867, 2024.", + "[717] R. K. Sharma, V. Gupta, and D. Grossman, \"Spml: A dsl for defending language models against prompt attacks,\" arXiv preprint arXiv:2402.11755, 2024.", + "[718] J. Zhao, S. Wang, Y. Zhao, X. Hou, K. Wang, P. Gao, Y. Zhang, C. Wei, and H. Wang, \"Models are codes: Towards measuring malicious code poisoning attacks on pre-trained model hubs,\" in Proceedings of the 39th IEEE/ACM International Conference on Automated Software Engineering, 2024, pp. 2087-2098.", + "[719] S. Ghosh, P. Varshney, E. Galinkin, and C. Parisien, \"Aegis: Online adaptive ai content safety moderation with ensemble of llm experts,\" arXiv preprint arXiv:2404.05993, 2024.", + "[720] S. Ghosh, P. Varshney, M. N. Sreedhar, A. Padmakumar, T. Rebedea, J. R. Varghese, and C. Parisien," + ], + "bbox": [ + 76, + 53, + 491, + 941 + ], + "page_idx": 60 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "\"Aegis2.0: A diverse ai safety dataset and risks taxonomy for alignment of llm guardrails,\" in Neurips Safe Generative AI Workshop 2024, 2024.", + "[721] S. Han, K. Rao, A. Ettinger, L. Jiang, B. Y. Lin, N. Lambert, Y. Choi, and N. Dziri, \"Wildguard: Open one-stop moderation tools for safety risks, jailbreaks, and refusals of llms,\" arXiv preprint arXiv:2406.18495, 2024.", + "[722] W. Zeng, Y. Liu, R. Mullins, L. Peran, J. Fernandez, H. Harkous, K. Narasimhan, D. Proud, P. Kumar, B. Radharapu et al., \"Shieldgemma: Generative ai content moderation based on gemma,\" arXiv preprint arXiv:2407.21772, 2024.", + "[723] Y. Liu, H. Gao, S. Zhai, J. Xia, T. Wu, Z. Xue, Y. Chen, K. Kawaguchi, J. Zhang, and B. Hooi, \"Guardreasoner: Towards reasoning-based llm safeguards,\" arXiv preprint arXiv:2501.18492, 2025.", + "[724] C. Wang, Y. Liu, B. Li, D. Zhang, Z. Li, and J. Fang, \"Safety in large reasoning models: A survey,\" arXiv preprint arXiv:2504.17704, 2025.", + "[725] H. Jin, A. Zhou, J. Menke, and H. Wang, \"Jailbreaking large language models against moderation guardrails via cipher characters,\" Advances in Neural Information Processing Systems, vol. 37, pp. 59408-59435, 2024.", + "[726] D. Ran, J. Liu, Y. Gong, J. Zheng, X. He, T. Cong, and A. Wang, \"Jailbreak: An integrated toolkit for evaluating jailbreak attempts against large language models,\" arXiv preprint arXiv:2406.09321, 2024.", + "[727] H. Qiu, S. Zhang, A. Li, H. He, and Z. Lan, \"Latent jailbreak: A benchmark for evaluating text safety and output robustness of large language models,\" arXiv preprint arXiv:2307.08487, 2023.", + "[728] K. Zhu, J. Wang, J. Zhou, Z. Wang, H. Chen, Y. Wang, L. Yang, W. Ye, Y. Zhang, N. Gong et al., \"Promptrobust: Towards evaluating the robustness of large language models on adversarial prompts,\" in Proceedings of the 1st ACM Workshop on Large AI Systems and Models with Privacy and Safety Analysis, 2023, pp. 57-68.", + "[729] A. Pei, Z. Yang, S. Zhu, R. Cheng, and J. Jia, \"Selfprompt: Autonomously evaluating llm robustness via domain-constrained knowledge guidelines and refined adversarial prompts,\" arXiv preprint arXiv:2412.00765, 2024.", + "[730] Z. Xu, Y. Liu, G. Deng, Y. Li, and S. Picek, \"A comprehensive study of jailbreak attack versus defense for large language models,\" arXiv preprint arXiv:2402.13457, 2024.", + "[731] K. Chen, Y. Liu, D. Wang, J. Chen, and W. Wang, \"Characterizing and evaluating the reliability of llms against jailbreak attacks,\" arXiv preprint arXiv:2408.09326, 2024.", + "[732] B. Wang, C. Xu, S. Wang, Z. Gan, Y. Cheng, J. Gao, A. H. Awadallah, and B. Li, \"Adversarial glue: A multi-task benchmark for robustness evaluation of language models,\" arXiv preprint arXiv:2111.02840, 2021.", + "[733] G. Dong, J. Zhao, T. Hui, D. Guo, W. Wang, B. Feng, Y. Qiu, Z. Gongque, K. He, Z. Wang et al., \"Revisit input perturbation problems for llms: A unified robustness evaluation framework for noisy slot filling task,\" in CCF International Conference on Natural Language Processing and Chinese Computing. Springer," + ], + "bbox": [ + 506, + 53, + 921, + 941 + ], + "page_idx": 60 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 60 + }, + { + "type": "page_number", + "text": "61", + "bbox": [ + 906, + 32, + 919, + 42 + ], + "page_idx": 60 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "2023, pp. 682-694.", + "[734] J. Zheng, A. Ritter, and W. Xu, \"Neo-bench: Evaluating robustness of large language models with neologisms,\" arXiv preprint arXiv:2402.12261, 2024.", + "[735] Y. Li, Y. Guo, F. Guerin, and C. Lin, \"Evaluating large language models for generalization and robustness via data compression,\" arXiv preprint arXiv:2402.00861, 2024.", + "[736] Q. Zhang, H. Qiu, D. Wang, Y. Li, T. Zhang, W. Zhu, H. Weng, L. Yan, and C. Zhang, “A benchmark for semantic sensitive information in llms outputs,” in The Thirteenth International Conference on Learning Representations.", + "[737] A. Wang, A. Singh, J. Michael, F. Hill, O. Levy, and S. R. Bowman, \"Glue: A multi-task benchmark and analysis platform for natural language understanding,\" arXiv preprint arXiv:1804.07461, 2018.", + "[738] J. Li, X. Cheng, W. X. Zhao, J.-Y. Nie, and J.-R. Wen, \"Halueval: A large-scale hallucination evaluation benchmark for large language models,\" arXiv preprint arXiv:2305.11747, 2023.", + "[739] A. Pal, L. K. Umapathi, and M. Sankarasubbu, \"Med-halt: Medical domain hallucination test for large language models,\" arXiv preprint arXiv:2307.15343, 2023.", + "[740] Z. Ji, Y. Gu, W. Zhang, C. Lyu, D. Lin, and K. Chen, \"Anah: Analytical annotation of hallucinations in large language models,\" arXiv preprint arXiv:2405.20315, 2024.", + "[741] P. Manakul, A. Liusie, and M. J. Gales, \"Selfcheck-gpt: Zero-resource black-box hallucination detection for generative large language models,\" arXiv preprint arXiv:2303.08896, 2023.", + "[742] Y.-S. Chuang, Y. Xie, H. Luo, Y. Kim, J. Glass, and P. He, \"Dola: Decoding by contrasting layers improves factuality in large language models,\" arXiv preprint arXiv:2309.03883, 2023.", + "[743] N. Mündler, J. He, S. Jenko, and M. Vechev, \"Self-contradictory hallucinations of large language models: Evaluation, detection and mitigation,\" arXiv preprint arXiv:2305.15852, 2023.", + "[744] M. Elaraby, M. Lu, J. Dunn, X. Zhang, Y. Wang, S. Liu, P. Tian, Y. Wang, and Y. Wang, \"Halo: Estimation and reduction of hallucinations in open-source weak large language models,\" arXiv preprint arXiv:2308.11764, 2023.", + "[745] Z. Ji, D. Chen, E. Ishii, S. Cahyawijaya, Y. Bang, B. Wilie, and P. Fung, \"Llm internal states reveal hallucination risk faced with a query,\" arXiv preprint arXiv:2407.03282, 2024.", + "[746] J. Wei, Y. Yao, J.-F. Ton, H. Guo, A. Estornell, and Y. Liu, \"Measuring and reducing llm hallucination without gold-standard answers,\" arXiv preprint arXiv:2402.10412, 2024.", + "[747] A. Deshpande, V. Murahari, T. Rajpurohit, A. Kalyan, and K. Narasimhan, \"Toxicity in chatgpt: Analyzing persona-assigned language models,\" arXiv preprint arXiv:2304.05335, 2023.", + "[748] A. de Wynter, I. Watts, T. Wongsangaroonsri, M. Zhang, N. Farra, N. E. Altintoprak, L. Baur, S. Claudet, P. Gajdusek, C. Gören et al., \"Rtp-lx: Can llms evaluate toxicity in multilingual scenarios?\"" + ], + "bbox": [ + 76, + 53, + 491, + 941 + ], + "page_idx": 61 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "arXiv preprint arXiv:2404.14397, 2024.", + "[749] D. Esiobu, X. Tan, S. Hosseini, M. Ung, Y. Zhang, J. Fernandes, J. Dwivedi-Yu, E. Presani, A. Williams, and E. M. Smith, \"Robbie: Robust bias evaluation of large generative language models,\" arXiv preprint arXiv:2311.18140, 2023.", + "[750] S. Wang, P. Wang, T. Zhou, Y. Dong, Z. Tan, and J. Li, \"Ceb: Compositional evaluation benchmark for fairness in large language models,\" arXiv preprint arXiv:2407.02408, 2024.", + "[751] H. Li, D. Guo, D. Li, W. Fan, Q. Hu, X. Liu, C. Chan, D. Yao, Y. Yao, and Y. Song, \"Privlm-bench: A multi-level privacy evaluation benchmark for language models,\" arXiv preprint arXiv:2311.04044, 2023.", + "[752] Q. Li, J. Hong, C. Xie, J. Tan, R. Xin, J. Hou, X. Yin, Z. Wang, D. Hendrycks, Z. Wang et al., \"Llm-pbe: Assessing data privacy in large language models,\" arXiv preprint arXiv:2408.12787, 2024.", + "[753] D. Zhu, D. Chen, X. Wu, J. Geng, Z. Li, J. Grossklags, and L. Ma, \"Privauditor: Benchmarking data protection vulnerabilities in llm adaptation techniques,\" Advances in Neural Information Processing Systems, vol. 37, pp. 9668-9689, 2024.", + "[754] L. Rossi, B. Marek, V. Hanke, X. Wang, M. Backes, A. Dziedzic, and F. Boenisch, \"Auditing empirical privacy protection of private llm adaptations,\" in Neurips Safe Generative AI Workshop 2024.", + "[755] T. Singh, H. Aditya, V. K. Madisetti, and A. Bahga, \"Whispered tuning: Data privacy preservation in finetuning llms through differential privacy,\" Journal of Software Engineering and Applications, vol. 17, no. 1, pp. 1-22, 2024.", + "[756] H. Li, W. Hu, H. Jing, Y. Chen, Q. Hu, S. Han, T. Chu, P. Hu, and Y. Song, \"Privaci-bench: Evaluating privacy with contextual integrity and legal compliance,\" arXiv preprint arXiv:2502.17041, 2025.", + "[757] O. Cartwright, H. Dunbar, and T. Radcliffe, “Evaluating privacy compliance in commercial large language models-chatgpt, claude, and gemini,” 2024.", + "[758] X. Zhou, M. Weyssow, R. Widyasari, T. Zhang, J. He, Y. Lyu, J. Chang, B. Zhang, D. Huang, and D. Lo, \"Lessleak-bench: A first investigation of data leakage in llms across 83 software engineering benchmarks,\" arXiv preprint arXiv:2502.06215, 2025.", + "[759] Y. Song, R. Liu, S. Chen, Q. Ren, Y. Zhang, and Y. Yu, \"Securesql: Evaluating data leakage of large language models as natural language interfaces to databases,\" in Findings of the Association for Computational Linguistics: EMNLP 2024, 2024, pp. 5975-5990.", + "[760] X. Liu, Y. Zhu, J. Gu, Y. Lan, C. Yang, and Y. Qiao, \"Mm-safetybench: A benchmark for safety evaluation of multimodal large language models,\" in European Conference on Computer Vision. Springer, 2024, pp. 386-403.", + "[761] W. Luo, S. Ma, X. Liu, X. Guo, and C. Xiao, \"Jailbreakv-28k: A benchmark for assessing the robustness of multimodal large language models against jailbreak attacks,\" arXiv e-prints, pp. arXiv-2404, 2024.", + "[762] F. Weng, Y. Xu, C. Fu, and W. Wang, \"A comprehensive study on jailbreak attacks and defenses for" + ], + "bbox": [ + 506, + 53, + 924, + 941 + ], + "page_idx": 61 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 61 + }, + { + "type": "page_number", + "text": "62", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 61 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "multimodal large language models,\" arXiv preprint arXiv:2408.08464, 2024.", + "[763] Z. Li, P.-Y. Chen, and T.-Y. Ho, \"Retention score: Quantifying jailbreak risks for vision language models,\" arXiv preprint arXiv:2412.17544, 2024.", + "[764] T. Guan, F. Liu, X. Wu, R. Xian, Z. Li, X. Liu, X. Wang, L. Chen, F. Huang, Y. Yacoob et al., \"Hallusionbench: an advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 14375-14385.", + "[765] Y. Li, Y. Du, K. Zhou, J. Wang, W. X. Zhao, and J.-R. Wen, \"Evaluating object hallucination in large vision-language models,\" arXiv preprint arXiv:2305.10355, 2023.", + "[766] C. Cui, Y. Zhou, X. Yang, S. Wu, L. Zhang, J. Zou, and H. Yao, “Holistic analysis of hallucination in gpt-4v (ision): Bias and interference challenges,” arXiv preprint arXiv:2311.03287, 2023.", + "[767] S. Wang, X. Ye, Q. Cheng, J. Duan, S. Li, J. Fu, X. Qiu, and X. Huang, \"Cross-modality safety alignment,\" arXiv preprint arXiv:2406.15279, 2024.", + "[768] A. Agarwal, S. Panda, A. Charles, B. Kumar, H. Patel, P. Pattnayak, T. H. Rafi, T. Kumar, and D.-K. Chae, \"Mvtamperbench: Evaluating robustness of vision-language models,\" arXiv preprint arXiv:2412.19794, 2024.", + "[769] H. Zhang, W. Shao, H. Liu, Y. Ma, P. Luo, Y. Qiao, and K. Zhang, \"Avibench: Towards evaluating the robustness of large vision-language model on adversarial visual-instructions,\" arXiv e-prints, pp. arXiv-2403, 2024.", + "[770] Z. Hu, Y. Ren, J. Li, and Y. Yin, \"Viva: A benchmark for vision-grounded decision-making with human values,\" arXiv preprint arXiv:2407.03000, 2024.", + "[771] Y. Xiao, A. Liu, Q. Cheng, Z. Yin, S. Liang, J. Li, J. Shao, X. Liu, and D. Tao, \"Genderbias- $\\cdot$ emph {VL}: Benchmarking gender bias in vision language models via counterfactual probing,\" arXiv preprint arXiv:2407.00600, 2024.", + "[772] L. Gustafson, C. Rolland, N. Ravi, Q. Duval, A. Adcock, C.-Y. Fu, M. Hall, and C. Ross, \"Facet: Fairness in computer vision evaluation benchmark,\" in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023, pp. 20370-20382.", + "[773] E. Slyman, S. Lee, S. Cohen, and K. Kafle, \"Fairdedup: Detecting and mitigating vision-language fairness disparities in semantic dataset dedduplication,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 13905-13916.", + "[774] Y. Zhang, J. Wang, and J. Sang, \"Counterfactually measuring and eliminating social bias in vision-language pre-training models,\" in Proceedings of the 30th ACM International Conference on Multimedia, 2022, pp. 4996-5004.", + "[775] K. C. Fraser and S. Kiritchenko, \"Examining gender and racial bias in large vision-language models using a novel dataset of parallel images,\" arXiv preprint arXiv:2402.05779, 2024.", + "[776] A. Seth, M. Hemani, and C. Agarwal, \"Dear: Debias" + ], + "bbox": [ + 76, + 53, + 491, + 943 + ], + "page_idx": 62 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ing vision-language models with additive residuals,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 6820-6829.", + "[777] S. Janghorbani and G. De Melo, \"Multimodal bias: Introducing a framework for stereotypical bias assessment beyond gender and race in vision language models,\" arXiv preprint arXiv:2303.12734, 2023.", + "[778] Y. Zhang, Y. Huang, Y. Sun, C. Liu, Z. Zhao, Z. Fang, Y. Wang, H. Chen, X. Yang, X. Wei et al., \"Benchmarking trustworthiness of multimodal large language models: A comprehensive study,\" arXiv preprint arXiv:2406.07057, 2024.", + "[779] Y. Zhang, L. Chen, G. Zheng, Y. Gao, R. Zheng, J. Fu, Z. Yin, S. Jin, Y. Qiao, X. Huang et al., \"Spa-vl: A comprehensive safety preference alignment dataset for vision language model,\" arXiv preprint arXiv:2406.12030, 2024.", + "[780] Z. Zhang, T. Kou, S. Wang, C. Li, W. Sun, W. Wang, X. Li, Z. Wang, X. Cao, X. Min et al., \"Q-eval-100k: Evaluating visual quality and alignment level for text-to-vision content,\" arXiv preprint arXiv:2503.02357, 2025.", + "[781] W. Wang, X. Liu, K. Gao, J.-T. Huang, Y. Yuan, P. He, S. Wang, and Z. Tu, \"Can't see the forest for the trees: Benchmarking multimodal safety awareness for multimodal llms,\" ArXiv, vol. abs/2502.11184, 2025. [Online]. Available: https://api.sementicscholar.org/CorpusID:276409442", + "[782] W. Wang, K. Gao, Z. Jia, Y. Yuan, J.-T. Huang, Q. Liu, S. Wang, W. Jiao, and Z. Tu, \"Chain-of-jailbreak attack for image generation models via editing step by step,\" ArXiv, vol. abs/2410.03869, 2024. [Online]. Available: https://api_semanticscholar.org/ CorpusID:273186566", + "[783] H. Naveed, A. U. Khan, S. Qiu, M. Saqib, S. Anwar, M. Usman, N. Akhtar, N. Barnes, and A. Mian, \"A comprehensive overview of large language models,\" arXiv preprint arXiv:2307.06435, 2023.", + "[784] W. Zhao, Y. Hu, Y. Deng, J. Guo, X. Sui, X. Han, A. Zhang, Y. Zhao, B. Qin, T.-S. Chua et al., \"Beware of your po! measuring and mitigating ai safety risks in role-play fine-tuning of llms,\" arXiv preprint arXiv:2502.20968, 2025.", + "[785] B. Liu, X. Li, J. Zhang, J. Wang, T. He, S. Hong, H. Liu, S. Zhang, K. Song, K. Zhu et al., \"Advances and challenges in foundation agents: From brain-inspired intelligence to evolutionary, collaborative, and safe systems,\" arXiv preprint arXiv:2504.01990, 2025.", + "[786] H. Jin, L. Huang, H. Cai, J. Yan, B. Li, and H. Chen, \"From llms to llm-based agents for software engineering: A survey of current, challenges and future,\" arXiv preprint arXiv:2408.02479, 2024.", + "[787] J. Piao, Y. Yan, J. Zhang, N. Li, J. Yan, X. Lan, Z. Lu, Z. Zheng, J. Y. Wang, D. Zhou et al., \"Agentsociety: Large-scale simulation of llm-driven generative agents advances understanding of human behaviors and society,\" arXiv preprint arXiv:2502.08691, 2025.", + "[788] Y. Yan, S. Wang, J. Huo, P. S. Yu, X. Hu, and Q. Wen, \"Mathagent: Leveraging a mixture-of-math-agent framework for real-world multimodal mathematical error detection,\" arXiv preprint arXiv:2503.18132, 2025." + ], + "bbox": [ + 506, + 53, + 921, + 943 + ], + "page_idx": 62 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 62 + }, + { + "type": "page_number", + "text": "63", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 62 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[789] H. Wang, A. Zhang, N. Duy Tai, J. Sun, T.-S. Chua et al., \"Ali-agent: Assessing llms' alignment with human values via agent-based evaluation,\" Advances in Neural Information Processing Systems, vol. 37, pp. 99040-99088, 2024.", + "[790] K. Zhang, J. Li, G. Li, X. Shi, and Z. Jin, \"Codeagent: Enhancing code generation with tool-integrated agent systems for real-world repo-level coding challenges,\" arXiv preprint arXiv:2401.07339, 2024.", + "[791] Y. Shen, K. Song, X. Tan, D. Li, W. Lu, and Y. Zhuang, \"Hugginggpt: Solving ai tasks with chatgpt and its friends in hugging face,\" Advances in Neural Information Processing Systems, vol. 36, pp. 38154-38180, 2023.", + "[792] Z. Chu, S. Wang, J. Xie, T. Zhu, Y. Yan, J. Ye, A. Zhong, X. Hu, J. Liang, P. S. Yu et al., \"Llm agents for education: Advances and applications,\" arXiv preprint arXiv:2503.11733, 2025.", + "[793] W. Zhang, Y. Shen, W. Lu, and Y. Zhuang, \"Data-copilot: Bridging billions of data and humans with autonomous workflow,\" arXiv preprint arXiv:2306.07209, 2023.", + "[794] W. Xu, Z. Liang, K. Mei, H. Gao, J. Tan, and Y. Zhang, \"A-mem: Agentic memory for llm agents,\" arXiv preprint arXiv:2502.12110, 2025.", + "[795] Y. Shang, Y. Li, K. Zhao, L. Ma, J. Liu, F. Xu, and Y. Li, \"Agentsquare: Automatic llm agent search in modular design space,\" arXiv preprint arXiv:2410.06153, 2024.", + "[796] J. Yang, C. Jimenez, A. Wettig, K. Lieret, S. Yao, K. Narasimhan, and O. Press, \"Swe-agent: Agent-computer interfaces enable automated software engineering,\" Advances in Neural Information Processing Systems, vol. 37, pp. 50528-50652, 2024.", + "[797] S. Agashe, J. Han, S. Gan, J. Yang, A. Li, and X. E. Wang, \"Agent s: An open agentic framework that uses computers like a human,\" arXiv preprint arXiv:2410.08164, 2024.", + "[798] S. Hao, Y. Gu, H. Ma, J. J. Hong, Z. Wang, D. Z. Wang, and Z. Hu, \"Reasoning with language model is planning with world model,\" arXiv preprint arXiv:2305.14992, 2023.", + "[799] J. Hong, J. Lin, A. Dragan, and S. Levine, \"Interactive dialogue agents via reinforcement learning on hindsight regenerations,\" arXiv preprint arXiv:2411.05194, 2024.", + "[800] J. Tang, T. Fan, and C. Huang, \"Autoagent: A fully-automated and zero-code framework for llm agents,\" arXiv e-prints, pp. arXiv-2502, 2025.", + "[801] G. Li, H. Hammoud, H. Itani, D. Khizbullin, and B. Ghanem, \"Camel: Communicative agents for\" mind\" exploration of large language model society,\" Advances in Neural Information Processing Systems, vol. 36, pp. 51991-52008, 2023.", + "[802] S. Yuan, K. Song, J. Chen, X. Tan, D. Li, and D. Yang, \"Evoagent: Towards automatic multi-agent generation via evolutionary algorithms,\" arXiv preprint arXiv:2406.14228, 2024.", + "[803] M. Zhuge, W. Wang, L. Kirsch, F. Faccio, D. Khizbullin, and J. Schmidhuber, \"Language agents as estimizable graphs,\" arXiv preprint arXiv:2402.16823, 2024.", + "[804] Y. Wang, T. Shen, L. Liu, and J. Xie, \"Sibyl: Simple" + ], + "bbox": [ + 76, + 53, + 491, + 941 + ], + "page_idx": 63 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "yet effective agent framework for complex real-world reasoning,\" arXiv preprint arXiv:2407.10718, 2024.", + "[805] Z. Wang, X. Zeng, W. Liu, L. Li, Y. Wang, L. Shang, X. Jiang, Q. Liu, and K.-F. Wong, \"Toolflow: Boosting llm tool-calling through natural and coherent dialogue synthesis,\" arXiv preprint arXiv:2410.18447, 2024.", + "[806] F. Wu, S. Wu, Y. Cao, and C. Xiao, \"Wipi: A new web threat for llm-driven web agents,\" arXiv preprint arXiv:2402.16965, 2024.", + "[807] S. S. Kannan, V. L. Venkatesh, and B.-C. Min, \"Smartllm: Smart multi-agent robot task planning using large language models,\" in 2024 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2024, pp. 12140-12147.", + "[808] R. Fang, R. Bindu, A. Gupta, and D. Kang, \"Llm agents can autonomously exploit one-day vulnerabilities,\" arXiv preprint arXiv:2404.08144, vol. 13, p. 14, 2024.", + "[809] R. Fang, R. Bindu, A. Gupta, Q. Zhan, and D. Kang, \"Llm agents can autonomously hack websites,\" arXiv preprint arXiv:2402.06664, 2024.", + "[810] W. Cheng, K. Sun, X. Zhang, and W. Wang, \"Security attacks on llm-based code completion tools,\" arXiv preprint arXiv:2408.11006, 2024.", + "[811] X. Fu, Z. Wang, S. Li, R. K. Gupta, N. Mireshghallah, T. Berg-Kirkpatrick, and E. Fernandes, \"Misusing tools in large language models with visual adversarial examples,\" arXiv preprint arXiv:2310.03185, 2023.", + "[812] X. Fu, S. Li, Z. Wang, Y. Liu, R. K. Gupta, T. Berg-Kirkpatrick, and E. Fernandes, \"Imprompter: Tricking llm agents into improper tool use,\" arXiv preprint arXiv:2410.14923, 2024.", + "[813] B. Zhang, Y. Tan, Y. Shen, A. Salem, M. Backes, S. Zannettou, and Y. Zhang, \"Breaking agents: Compromising autonomous llm agents through malfunction amplification,\" arXiv preprint arXiv:2407.20859, 2024.", + "[814] H. Wang, R. Zhang, J. Wang, M. Li, Y. Huang, D. Wang, and Q. Wang, \"From allies to adversaries: Manipulating llm tool-calling through adversarial injection,\" arXiv preprint arXiv:2412.10198, 2024.", + "[815] W. Yang, X. Bi, Y. Lin, S. Chen, J. Zhou, and X. Sun, \"Watch out for your agents! investigating backdoor threats to lvm-based agents,\" Advances in Neural Information Processing Systems, vol. 37, pp. 100938-100964, 2024.", + "[816] P. Zhu, Z. Zhou, Y. Zhang, S. Yan, K. Wang, and S. Su, \"Demonagent: Dynamically encrypted multi-backdoor implantation attack on llm-based agent,\" arXiv preprint arXiv:2502.12575, 2025.", + "[817] Y. Wang, D. Xue, S. Zhang, and S. Qian, \"Badagent: Inserting and activating backdoor attacks in llm agents,\" arXiv preprint arXiv:2406.03007, 2024.", + "[818] Z. Jiang, M. Li, G. Yang, J. Wang, Y. Huang, Z. Chang, and Q. Wang, \"Mimicking the familiar: Dynamic command generation for information theft attacks in llm tool-learning system,\" arXiv preprint arXiv:2502.11358, 2025.", + "[819] W. Zhao, V. Khazanchi, H. Xing, X. He, Q. Xu, and N. D. Lane, \"Attacks on third-party apis of large language models,\" arXiv preprint arXiv:2404.16891, 2024.", + "[820] J. Chen and S. L. Cong, \"Agentguard: Repurposing" + ], + "bbox": [ + 506, + 53, + 921, + 941 + ], + "page_idx": 63 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 63 + }, + { + "type": "page_number", + "text": "64", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 63 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "agentric orchestrator for safety evaluation of tool orchestration,\" arXiv preprint arXiv:2502.09809, 2025.", + "[821] X. Zhang, H. Xu, Z. Ba, Z. Wang, Y. Hong, J. Liu, Z. Qin, and K. Ren, \"Privacyasst: Safeguarding user privacy in tool-using large language model agents,\" IEEE Transactions on Dependable and Secure Computing, 2024.", + "[822] Z. Xiang, L. Zheng, Y. Li, J. Hong, Q. Li, H. Xie, J. Zhang, Z. Xiong, C. Xie, C. Yang et al., \"Guardagent: Safeguard llm agents by a guard agent via knowledge-enabled reasoning,\" arXiv preprint arXiv:2406.09187, 2024.", + "[823] Y. Gao, Y. Xiong, X. Gao, K. Jia, J. Pan, Y. Bi, Y. Dai, J. Sun, H. Wang, and H. Wang, \"Retrieval-augmented generation for large language models: A survey,\" arXiv preprint arXiv:2312.10997, vol. 2, 2023.", + "[824] P. Zhao, H. Zhang, Q. Yu, Z. Wang, Y. Geng, F. Fu, L. Yang, W. Zhang, J. Jiang, and B. Cui, \"Retrievalaugmented generation for ai-generated content: A survey,\" arXiv preprint arXiv:2402.19473, 2024.", + "[825] C. Xiang, T. Wu, Z. Zhong, D. Wagner, D. Chen, and P. Mittal, \"Certifiably robust rag against retrieval corruption,\" arXiv preprint arXiv:2405.15556, 2024.", + "[826] Z. Chen, Z. Xiang, C. Xiao, D. Song, and B. Li, \"Agentpoison: Red-teaming llm agents via poisoning memory or knowledge bases,\" Advances in Neural Information Processing Systems, vol. 37, pp. 130-185-130-213, 2025.", + "[827] W. Zou, R. Geng, B. Wang, and J. Jia, \"Poisonedrag: Knowledge corruption attacks to retrieval-augmented generation of large language models,\" arXiv preprint arXiv:2402.07867, 2024.", + "[828] Z. Zhong, Z. Huang, A. Wettig, and D. Chen, \"Poisoning retrieval corpora by injecting adversarial passages,\" arXiv preprint arXiv:2310.19156, 2023.", + "[829] X. Gu, X. Zheng, T. Pang, C. Du, Q. Liu, Y. Wang, J. Jiang, and M. Lin, \"Agent smith: A single image can jailbreak one million multimodal llm agents exponentially fast,\" arXiv preprint arXiv:2402.08567, 2024.", + "[830] A. Li, Y. Zhou, V. C. Raghuram, T. Goldstein, and M. Goldblum, \"Commercial llm agents are already vulnerable to simple yet dangerous attacks,\" arXiv preprint arXiv:2502.08586, 2025.", + "[831] H. Li, M. Xu, and Y. Song, \"Sentence embedding leaks more information than you expect: Generative embedding inversion attack to recover the whole sentence,\" arXiv preprint arXiv:2305.03010, 2023.", + "[832] M. Russinovich, A. Salem, and R. Eldan, \"Great, now write an article about that: The crescendo multi-turn llm jailbreak attack,\" arXiv preprint arXiv:2404.01833, 2024.", + "[833] Y. Cheng, M. Georgopoulos, V. Cevher, and G. G. Chrysos, \"Leveraging the context through multiround interactions for jailbreaking attacks,\" arXiv preprint arXiv:2402.09177, 2024.", + "[834] A. Priyanshu and S. Vijay, \"Fractured-sorry-bench: Framework for revealing attacks in conversational turns undermining refusal efficacy and defenses over sorry-bench (automated multi-shot jailbreaks),\" arXiv preprint arXiv:2408.16163, 2024.", + "[835] D. Agarwal, A. R. Fabbri, B. Risher, P. Laban," + ], + "bbox": [ + 76, + 53, + 491, + 941 + ], + "page_idx": 64 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "S. Joty, and C.-S. Wu, \"Prompt leakage effect and defense strategies for multi-turn llm interactions,\" arXiv preprint arXiv:2404.16251, 2024.", + "[836] T. Tong, J. Xu, Q. Liu, and M. Chen, \"Securing multi-turn conversational language models from distributed backdoor triggers,\" arXiv preprint arXiv:2407.04151, 2024.", + "[837] J. Mao, F. Meng, Y. Duan, M. Yu, X. Jia, J. Fang, Y. Liang, K. Wang, and Q. Wen, \"Agentsafe: Safeguarding large language model-based multi-agent systems via hierarchical data management,\" arXiv preprint arXiv:2503.04392, 2025.", + "[838] H. Zhou, K.-H. Lee, Z. Zhan, Y. Chen, and Z. Li, \"Trustrag: Enhancing robustness and trustworthiness in rag,\" arXiv preprint arXiv:2501.00879, 2025.", + "[839] X. Xian, G. Wang, X. Bi, J. Srinivasa, A. Kundu, C. Fleming, M. Hong, and J. Ding, \"On the vulnerability of applying retrieval-augmented generation within knowledge-intensive application domains,\" arXiv preprint arXiv:2409.17275, 2024.", + "[840] B. Chen, G. Wang, H. Guo, Y. Wang, and Q. Yan, \"Understanding multi-turn toxic behaviors in open-domain chatbots,\" in Proceedings of the 26th International Symposium on Research in Attacks, Intrusions and Defenses, 2023, pp. 282-296.", + "[841] R. Song, M. O. Ozmen, H. Kim, A. Bianchi, and Z. B. Celik, \"Enhancing llm-based autonomous driving agents to mitigate perception attacks,\" arXiv preprint arXiv:2409.14488, 2024.", + "[842] C. H. Low, Z. Wang, T. Zhang, Z. Zeng, Z. Zhuo, E. B. Mazomenos, and Y. Jin, \"Surgraw: Multi-agent workflow with chain-of-thought reasoning for surgical intelligence,\" arXiv preprint arXiv:2503.10265, 2025.", + "[843] Z. Wang, J. Wu, C. H. Low, and Y. Jin, \"Medagent-pro: Towards multi-modal evidence-based medical diagnosis via reasoning agentic workflow,\" arXiv preprint arXiv:2503.18968, 2025.", + "[844] K. N. Jeptoo and C. Sun, \"Enhancing fake news detection with large language models through multi-agent debates,\" in CCF International Conference on Natural Language Processing and Chinese Computing. Springer, 2024, pp. 474-486.", + "[845] T. Park, \"Enhancing anomaly detection in financial markets with an llm-based multi-agent framework,\" arXiv preprint arXiv:2403.19735, 2024.", + "[846] Z. Yang, S. S. Raman, A. Shah, and S. Tellex, \"Plug in the safety chip: Enforcing constraints for llm-driven robot agents,\" in 2024 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2024, pp. 14435-14442.", + "[847] J. Zhang, C. Xu, and B. Li, \"Chatscene: Knowledge-enabled safety-critical scenario generation for autonomous vehicles,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 15459-15469.", + "[848] T. Abuelsaad, D. Akkil, P. Dey, A. Jagmohan, A. Vempaty, and R. Kokku, \"Agent-e: From autonomous web navigation to foundational design principles in agenti-tic systems,\" arXiv preprint arXiv:2407.13032, 2024.", + "[849] E. Debenedetti, J. Zhang, M. Balunović, L. Beurer-Kellner, M. Fischer, and F. Tramère, \"Agentdojo: A dy" + ], + "bbox": [ + 506, + 53, + 921, + 941 + ], + "page_idx": 64 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 32, + 421, + 44 + ], + "page_idx": 64 + }, + { + "type": "page_number", + "text": "65", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 64 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "namic environment to evaluate attacks and defenses for llm agents,\" arXiv preprint arXiv:2406.13352, 2024.", + "[850] Y. Sun, N. Salami Pargoo, P. Jin, and J. Ortiz, \"Optimizing autonomous driving for safety: A human-centric approach with lvm-enhanced rlhf,\" in Companion of the 2024 on ACM International Joint Conference on Pervasive and Ubiquitous Computing, 2024, pp. 76-80.", + "[851] R. Fang, R. Bindu, A. Gupta, and D. Kang, \"Llm agents can autonomously exploit one-day vulnerabilities,\" arXiv preprint arXiv:2404.08144, vol. 13, p. 14, 2024.", + "[852] Y. H. Ke, R. Yang, S. A. Lie, T. X. Y. Lim, H. R. Abdullah, D. S. W. Ting, and N. Liu, \"Enhancing diagnostic accuracy through multi-agent conversations: using large language models to mitigate cognitive bias,\" arXiv preprint arXiv:2401.14589, 2024.", + "[853] X. Mou, Z. Wei, and X. Huang, \"Unveiling the truth and facilitating change: Towards agent-based largescale social movement simulation,\" arXiv preprint arXiv:2402.16333, 2024.", + "[854] Z. Chen, J. Chen, J. Chen, and M. Sra, \"Position: Standard benchmarks fail-llm agents present overlooked risks for financial applications,\" arXiv preprint arXiv:2502.15865, 2025.", + "[855] Z. Liu, R. Zeng, D. Wang, G. Peng, J. Wang, Q. Liu, P. Liu, and W. Wang, \"Agents4plc: Automating closed-loop plc code generation and verification in industrial control systems using llm-based agents,\" arXiv preprint arXiv:2410.14209, 2024.", + "[856] S. Mukherjee, P. Gamble, M. S. Ausin, N. Kant, K. Aggarwal, N. Manjunath, D. Datta, Z. Liu, J. Ding, S. Busacca et al., \"Polaris: A safety-focused llm constellation architecture for healthcare,\" arXiv preprint arXiv:2403.13313, 2024.", + "[857] L. La Cava and A. Tagarelli, \"Safeguarding decentralized social media: Llm agents for automating community rule compliance,\" arXiv preprint arXiv:2409.08963, 2024.", + "[858] Y. Gan, Y. Yang, Z. Ma, P. He, R. Zeng, Y. Wang, Q. Li, C. Zhou, S. Li, T. Wang et al., \"Navigating the risks: A survey of security, privacy, and ethics threats in lmbased agents,\" arXiv preprint arXiv:2411.09523, 2024.", + "[859] Z. Deng, Y. Guo, C. Han, W. Ma, J. Xiong, S. Wen, and Y. Xiang, \"Ai agents under threat: A survey of key security challenges and future pathways,\" ACM Computing Surveys, 2024.", + "[860] R. Ye, S. Tang, R. Ge, Y. Du, Z. Yin, S. Chen, and J. Shao, \"Mas-gpt: Training llms to build llm-based multi-agent systems,\" arXiv preprint arXiv:2503.03686, 2025.", + "[861] J. Zhang, J. Xiang, Z. Yu, F. Teng, X. Chen, J. Chen, M. Zhuge, X. Cheng, S. Hong, J. Wang et al., \"Aflow: Automating agentic workflow generation,\" arXiv preprint arXiv:2410.10762, 2024.", + "[862] L. Panait and S. Luke, \"Cooperative multi-agent learning: The state of the art,\" Autonomous agents and multiagent systems, vol. 11, pp. 387-434, 2005.", + "[863] L. Hammond, A. Chan, J. Clifton, J. Hoelscher-Obermaier, A. Khan, E. McLean, C. Smith, W. Barfuss, J. Foerster, T. Gavencciak et al., \"Multi-agent risks from advanced ai,\" arXiv preprint arXiv:2502.14143, 2025." + ], + "bbox": [ + 75, + 53, + 491, + 941 + ], + "page_idx": 65 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[864] R. Xu, X. Li, S. Chen, and W. Xu, \"Nuclear deployed: Analyzing catastrophic risks in decision-making of autonomous llm agents,\" arXiv preprint arXiv:2502.11355, 2025.", + "[865] Z. Zhou, Z. Li, J. Zhang, Y. Zhang, K. Wang, Y. Liu, and Q. Guo, \"Corba: Contagious recursive blocking attacks on multi-agent systems based on large language models,\" arXiv preprint arXiv:2502.14529, 2025.", + "[866] Z. Tan, C. Zhao, R. Moraffah, Y. Li, Y. Kong, T. Chen, and H. Liu, \"The wolf within: Covert injection of malice into mllm societies via an mllm operative,\" arXiv preprint arXiv:2402.14859, 2024.", + "[867] M. Yu, S. Wang, G. Zhang, J. Mao, C. Yin, Q. Liu, Q. Wen, K. Wang, and Y. Wang, \"Netsafe: Exploring the topological safety of multi-agent networks,\" arXiv preprint arXiv:2410.15686, 2024.", + "[868] J.-t. Huang, J. Zhou, T. Jin, X. Zhou, Z. Chen, W. Wang, Y. Yuan, M. Sap, and M. R. Lyu, \"On the resilience of multi-agent systems with malicious agents,\" arXiv preprint arXiv:2408.00989, 2024.", + "[869] P. He, Y. Lin, S. Dong, H. Xu, Y. Xing, and H. Liu, \"Red-teaming llm multi-agent systems via communication attacks,\" arXiv preprint arXiv:2502.14847, 2025.", + "[870] Y. Tian, X. Yang, J. Zhang, Y. Dong, and H. Su, \"Evil geniuses: Delving into the safety of llm-based agents,\" arXiv preprint arXiv:2311.11855, 2023.", + "[871] A. Amayuelas, X. Yang, A. Antoniades, W. Hua, L. Pan, and W. Wang, \"Multiagent collaboration attack: Investigating adversarial attacks in large language model collaborations via debate,\" arXiv preprint arXiv:2406.14711, 2024.", + "[872] T. Ju, Y. Wang, X. Ma, P. Cheng, H. Zhao, Y. Wang, L. Liu, J. Xie, Z. Zhang, and G. Liu, \"Flooding spread of manipulated knowledge in llm-based multi-agent communities,\" arXiv preprint arXiv:2407.07791, 2024.", + "[873] G. Lin and Q. Zhao, \"Large language model sentinel: Llm agent for adversarial purification,\" arXiv preprint arXiv:2405.20770, 2024.", + "[874] Y. Zeng, Y. Wu, X. Zhang, H. Wang, and Q. Wu, \"Autodefense: Multi-agent llm defense against jailbreak attacks,\" arXiv preprint arXiv:2403.04783, 2024.", + "[875] S. Chern, Z. Fan, and A. Liu, \"Combating adversarial attacks with multi-agent debate,\" arXiv preprint arXiv:2401.05998, 2024.", + "[876] B. Chen, G. Li, X. Lin, Z. Wang, and J. Li, \"Blockagents: Towards byzantine-robust llm-based multi-agent coordination via blockchain,\" in Proceedings of the ACM Turing Award Celebration Conference-China 2024, 2024, pp. 187-192.", + "[877] C. Song, L. Ma, J. Zheng, J. Liao, H. Kuang, and L. Yang, \"Audit-llm: Multi-agent collaboration for log-based insider threat detection,\" arXiv preprint arXiv:2408.08902, 2024.", + "[878] S. Wang, G. Zhang, M. Yu, G. Wan, F. Meng, C. Guo, K. Wang, and Y. Wang, \"G-safeguard: A topology-guided security lens and treatment on llm-based multi-agent systems,\" arXiv preprint arXiv:2502.11127, 2025.", + "[879] Z. Wu, S. Pan, F. Chen, G. Long, C. Zhang, and S. Y. Philip, \"A comprehensive survey on graph neural networks,\" IEEE transactions on neural networks and" + ], + "bbox": [ + 506, + 53, + 921, + 941 + ], + "page_idx": 65 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 65 + }, + { + "type": "page_number", + "text": "66", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 65 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "learning systems, vol. 32, no. 1, pp. 4-24, 2020.", + "[880] X. Zheng, Y. Wang, Y. Liu, M. Li, M. Zhang, D. Jin, P. S. Yu, and S. Pan, \"Graph neural networks for graphs with heterophily: A survey,\" arXiv preprint arXiv:2202.07082, 2022.", + "[881] M. R. Genesereth and S. P. Ketchpel, \"The kqml protocol: A specification of language and communication,\" in Proceedings of the Third International Conference on Information and Knowledge Management (CIKM). ACM, 1993, pp. 1-10.", + "[882] D. S. Milojicic, M. Breugst, I. Busse, J. Campbell, S. Covaci, B. Friedman, K. Kosaka, D. B. Lange, K. Ono, M. Oshima, C. Tham, S. Virdhagriswaran, and J. White, \"Masif: The omg mobile agent system interoperability facility,\" in Proceedings of the Second International Workshop on Mobile Agents, ser. MA '98. Berlin, Heidelberg: Springer-Verlag, 1998, p. 50-67.", + "[883] F. for Intelligent Physical Agents, \"Fipa communicative act library specification,\" https://www.fipa.org/specs/fipa00037/SC00037J.html, 2000.", + "[884] F. Curbera, M. Duftler, R. Khalaf, W. Nagy, N. Mukhi, and S. Weerawarana, \"Web services: Why and how,\" IBM Systems Journal, vol. 41, no. 2, pp. 170-177, 2002.", + "[885] G. Hohpe and B. Woolf, Enterprise Integration Patterns: Designing, Building, and Deploying Messaging Solutions, ser. Addison-Wesley Signature Series (Fowler). Addison-Wesley Professional, 2006.", + "[886] P. Lewis, E. Perez, A. Piktus, F. Petroni, V. Karpukhin, N. Goyal, H. Kuttler, M. Lewis, W.-t. Yih, T. Rocktäschel et al., \"Retrieval-augmented generation for knowledge-intensive nlp tasks,\" Advances in neural information processing systems, vol. 33, pp. 9459-9474, 2020.", + "[887] G. Izacard and E. Grave, \"Towards an efficient pipeline for knowledge-intensive nlp tasks,\" arXiv preprint arXiv:2112.04426, 2021.", + "[888] H. Chase, \"Langchain: Build applications with llms through composability,\" https://github.com/ langchain-ai/langchain, 2022, accessed: Apr. 2025.", + "[889] J. Wu et al., \"Llamaindex: Connecting llms to your knowledge,\" https://github.com/jerryjliu/llama_index, 2023, accessed: Apr. 2025.", + "[890] OpenAI, \"Function calling in openerai models,\" https://platform.openai.com/docs/guides/functions, 2023, accessed: Apr. 2025.", + "[891] Anthropic, \"Model context protocol,\" 2024, accessed: 2025-04-19. [Online]. Available: https://www.anthropic.com/news/model-context-protocol", + "[892] Google, \"A2a: Agent2agent protocol,\" 2025, accessed: 2025-04-21. [Online]. Available: https://github.com/google/A2A", + "[893] G. Chang, \"Anp: Agent network protocol,\" 2024, accessed: 2025-04-21. [Online]. Available: https://www(agent-network-protocol.com/", + "[894] WildCardAI, \"agents.json specification,\" https://github.com/wild-card-ai/agents.json, 2025, accessed: 2025-04-22.", + "[895] NEAR, \"Aitp: Agent interaction & transaction protocol,\" 2025, accessed: 2025-04-22. [Online]. Available: https://aitp.dev/", + "[896] L. F. Al and L. Data, \"Acp: Agent communication pro" + ], + "bbox": [ + 76, + 53, + 491, + 943 + ], + "page_idx": 66 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "tocol,\" 2025, accessed: 2025-04-22. [Online]. Available: https://github.com/orgs/i-am-bee/discussions/284", + "[897] G. Cisco, Langchain, \"Acp: Agent connect protocol,\" 2025, accessed: 2025-04-22. [Online]. Available: https://spec.acp.agntcy.org/", + "[898] S. Marro, E. L. Malfa, J. Wright, G. Li, N. Shadbolt, M. Wooldridge, and P. Torr, \"A scalable communication protocol for networks of large language models,\" 2024. [Online]. Available: https://arxiv.org/abs/2410.11905", + "[899] Eclipse, \"Language model operating system (lmos),\" https://eclipse.dev/lmos/, 2025, accessed: 2025-04-22.", + "[900] AlEngineerFoundation, \"Agent protocol,\" https://agentprotocol.ai/, 2025, accessed: 2025-04-22.", + "[901] R. Ranjan, S. Gupta, and S. N. Singh, \"Loka protocol: A decentralized framework for trustworthy and ethical ai agent ecosystems,\" 2025. [Online]. Available: https://arxiv.org/abs/2504.10915", + "[902] A. Srinivasan, K. Bania, S. V, H. Mestha, and S. Liu, \"Implementation and application of an intelligibility protocol for interaction with an llm,\" 2024. [Online]. Available: https://arxiv.org/abs/2410.20600", + "[903] I. Bae, J. Lee, and H.-G. Jeon, \"Continuous locomotive crowd behavior generation,\" 2025. [Online]. Available: https://arxiv.org/abs/2504.04756", + "[904] L. Gąsieniec, Łukasz Kuszner, E. Latif, R. Parasuraman, P. Spirakis, and G. Stachowiak, \"Anonymous distributed localisation via spatial population protocols,\" 2024. [Online]. Available: https://arxiv.org/abs/2411.08434", + "[905] J. Tu, T. Wang, J. Wang, S. Manivasagam, M. Ren, and R. Urtasun, \"Adversarial attacks on multi-agent communication,\" in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2021, pp. 7768-7777.", + "[906] L. Yuan, F. Chen, Z. Zhang, and Y. Yu, \"Communication-robust multi-agent learning by adaptable auxiliary multi-agent adversary generation,\" Frontiers of Computer Science, vol. 18, no. 6, p. 186331, 2024.", + "[907] J. Blumenkamp and A. Prorok, \"The emergence of adversarial communication in multi-agent reinforcement learning,\" in Conference on Robot Learning. PMLR, 2021, pp. 1394-1414.", + "[908] Z. Chen, Z. Xiang, C. Xiao, D. Song, and B. Li, \"Agent-poison: Red-teaming llm agents via poisoning memory or knowledge bases,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems.", + "[909] X. Pan, J. Dai, Y. Fan, and M. Yang, \"Frontier ai systems have surpassed the self-replicating red line,\" arXiv preprint arXiv:2412.12140, 2024.", + "[910] L. Yu, Y. Qiu, Q. Yao, Y. Shen, X. Zhang, and J. Wang, \"Robust communicative multi-agent reinforcement learning with active defense,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 16, 2024, pp. 17575-17582.", + "[911] J. Light, M. Cai, S. Shen, and Z. Hu, \"Avalonbench: Evaluating llms playing the game of avalon,\" arXiv preprint arXiv:2310.05036, 2023.", + "[912] Q. Xie, Q. Feng, T. Zhang, Q. Li, L. Yang, Y. Zhang," + ], + "bbox": [ + 506, + 53, + 921, + 943 + ], + "page_idx": 66 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 66 + }, + { + "type": "page_number", + "text": "67", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 66 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "R. Feng, L. He, S. Gao, and Y. Zhang, \"Human simulacra: Benchmarking the personification of large language models,\" arXiv preprint arXiv:2402.18180, 2024.", + "[913] L. Geng and E. Y. Chang, \"Realm-bench: A real-world planning benchmark for llms and multi-agent systems,\" arXiv preprint arXiv:2502.18836, 2025.", + "[914] Y. Dubois, B. Galambosi, P. Liang, and T. B. Hashimoto, \"Length-controlled alpacaeval: A simple way to debias automatic evaluators,\" arXiv preprint arXiv:2404.04475, 2024.", + "[915] W. Wang, J. Shi, C. Wang, C. Lee, Y. Yuan, J.-T. Huang, and M. R. Lyu, \"Learning to ask: When llms meet unclear instruction,\" ArXiv, vol. abs/2409.00557, 2024. [Online]. Available: https://api-semanticscholar.org/CorpusID:272368496", + "[916] C. Guo, X. Liu, C. Xie, A. Zhou, Y. Zeng, Z. Lin, D. Song, and B. Li, \"Redcode: Risky code execution and generation benchmark for code agents,\" Advances in Neural Information Processing Systems, vol. 37, pp. 106-190-106-236, 2024.", + "[917] X. Yuan, J. Li, D. Wang, Y. Chen, X. Mao, L. Huang, H. Xue, W. Wang, K. Ren, and J. Wang, \"S-eval: Automatic and adaptive test generation for benchmarking safety evaluation of large language models,\" arXiv preprint arXiv:2405.14191, 2024.", + "[918] D. Dorn, A. Variengien, C.-R. Segerie, and V. Corruble, \"Bells: A framework towards future proof benchmarks for the evaluation of llm safeguards,\" arXiv preprint arXiv:2406.01364, 2024.", + "[919] Y. Shao, T. Li, W. Shi, Y. Liu, and D. Yang, \"Privacylens: Evaluating privacy norm awareness of language models in action,\" arXiv preprint arXiv:2409.00138, 2024.", + "[920] Q. Zhan, Z. Liang, Z. Ying, and D. Kang, \"Injecagent: Benchmarking indirect prompt injections in tool-integrated large language model agents,\" arXiv preprint arXiv:2403.02691, 2024.", + "[921] Z. Zhu, B. Wu, Z. Zhang, and B. Wu, \"Riskawarebench: Towards evaluating physical risk awareness for high-level planning of llm-based embodied agents,\" arXiv e-prints, pp. arXiv-2408, 2024.", + "[922] Z. Zhang, S. Cui, Y. Lu, J. Zhou, J. Yang, H. Wang, and M. Huang, \"Agent-safetybench: Evaluating the safety of llm agents,\" arXiv preprint arXiv:2412.14470, 2024.", + "[923] M. Andriushchenko, A. Souly, M. Dziemian, D. Duenas, M. Lin, J. Wang, D. Hendrycks, A. Zou, Z. Kolter, M. Fredrikson et al., \"Agentharm: A benchmark for measuring harmfulness of llm agents,\" arXiv preprint arXiv:2410.09024, 2024.", + "[924] J. Ye, S. Li, G. Li, C. Huang, S. Gao, Y. Wu, Q. Zhang, T. Gui, and X. Huang, \"Toolsword: Unveiling safety issues of large language models in tool learning across three stages,\" arXiv preprint arXiv:2402.10753, 2024.", + "[925] Y. Ruan, H. Dong, A. Wang, S. Pitis, Y. Zhou, J. Ba, Y. Dubois, C. J. Maddison, and T. Hashimoto, \"Identifying the risks of lm agents with an lm-emulated sandbox,\" arXiv preprint arXiv:2309.15817, 2023.", + "[926] X. Zhou, H. Kim, F. Brahman, L. Jiang, H. Zhu, X. Lu, F. Xu, B. Y. Lin, Y. Choi, N. Mireshghallah et al., \"Haicosystem: An ecosystem for sandboxing" + ], + "bbox": [ + 75, + 53, + 491, + 943 + ], + "page_idx": 67 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "safety risks in human-ai interactions,\" arXiv preprint arXiv:2409.16427, 2024.", + "[927] S. Yin, X. Pang, Y. Ding, M. Chen, Y. Bi, Y. Xiong, W. Huang, Z. Xiang, J. Shao, and S. Chen, \"Safeagent-bench: A benchmark for safe task planning of embodied llm agents,\" arXiv preprint arXiv:2412.13178, 2024.", + "[928] J. BENCHMARK, \"Jailjudge: A comprehensive jailbreak judge benchmark with multi-agent enhanced explanation evaluation framework.\"", + "[929] P. Y. Zhong, S. Chen, R. Wang, M. McCall, B. L. Titzer, and H. Miller, \"Rtbas: Defending llm agents against prompt injection and privacy leakage,\" arXiv preprint arXiv:2502.08966, 2025.", + "[930] A. Liu, Y. Zhou, X. Liu, T. Zhang, S. Liang, J. Wang, Y. Pu, T. Li, J. Zhang, W. Zhou et al., \"Compromising lvm driven embodied agents with contextual backdoor attacks,\" IEEE Transactions on Information Forensics and Security, 2025.", + "[931] —, \"Compromising embodied agents with contextual backdoor attacks,\" arXiv preprint arXiv:2408.02882, 2024.", + "[932] H. Zhang, C. Zhu, X. Wang, Z. Zhou, S. Hu, and L. Y. Zhang, \"Badrobot: Jailbreaking llm-based embodied ai in the physical world,\" arXiv preprint arXiv:2407.20242, 2024.", + "[933] W. Shen, C. Li, H. Chen, M. Yan, X. Quan, H. Chen, J. Zhang, and F. Huang, \"Small llms are weak tool learners: A multi-llm agent,\" arXiv preprint arXiv:2401.07324, 2024.", + "[934] S. Yuan, K. Song, J. Chen, X. Tan, Y. Shen, R. Kan, D. Li, and D. Yang, \"Easytool: Enhancing llm-based agents with concise tool instruction,\" arXiv preprint arXiv:2401.06201, 2024.", + "[935] S. Wu, S. Zhao, Q. Huang, K. Huang, M. Yasunaga, K. Cao, V. Ioannidis, K. Subbian, J. Leskovec, and J. Y. Zou, \"Avatar: Optimizing llm agents for tool usage via contrastive reasoning,\" Advances in Neural Information Processing Systems, vol. 37, pp. 25981-26010, 2024.", + "[936] Z. Shen, \"Llm with tools: A survey,\" arXiv preprint arXiv:2409.18807, 2024.", + "[937] C. Qian, W. Liu, H. Liu, N. Chen, Y. Dang, J. Li, C. Yang, W. Chen, Y. Su, X. Cong et al., \"Chatdev: Communicative agents for software development,\" arXiv preprint arXiv:2307.07924, 2023.", + "[938] Z. M. Wang, Z. Peng, H. Que, J. Liu, W. Zhou, Y. Wu, H. Guo, R. Gan, Z. Ni, J. Yang et al., \"Rolellm: Benchmarking, eliciting, and enhancing role-playing abilities of large language models,\" arXiv preprint arXiv:2310.00746, 2023.", + "[939] J. Zhou, Z. Chen, D. Wan, B. Wen, Y. Song, J. Yu, Y. Huang, L. Peng, J. Yang, X. Xiao et al., \"Characterglm: Customizing chinese conversational ai characters with large language models,\" arXiv preprint arXiv:2311.16832, 2023.", + "[940] Z. Chen, K. Liu, Q. Wang, W. Zhang, J. Liu, D. Lin, K. Chen, and F. Zhao, \"Agent-flan: Designing data and methods of effective agent tuning for large language models,\" arXiv preprint arXiv:2403.12881, 2024.", + "[941] G. Zhang, L. Niu, J. Fang, K. Wang, L. Bai, and X. Wang, \"Multi-agent architecture search via agentic supernet,\" arXiv preprint arXiv:2502.04180, 2025." + ], + "bbox": [ + 506, + 53, + 921, + 943 + ], + "page_idx": 67 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 67 + }, + { + "type": "page_number", + "text": "68", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 67 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[942] L. P. Kaelbling, M. L. Littman, and A. W. Moore, \"Reinforcement learning: A survey,\" Journal of artificial intelligence research, vol. 4, pp. 237-285, 1996.", + "[943] Y. Li, \"Deep reinforcement learning: An overview,\" arXiv preprint arXiv:1701.07274, 2017.", + "[944] X. Li, Y. Fan, and S. Cheng, \"Aigc in china: Current developments and future outlook,\" arXiv preprint arXiv:2308.08451, 2023.", + "[945] X. Sun, L. Dong, X. Li, Z. Wan, S. Wang, T. Zhang, J. Li, F. Cheng, L. Lyu, F. Wu et al., \"Pushing the limits of chatgpt on nlp tasks,\" arXiv preprint arXiv:2306.09719, 2023.", + "[946] G. Sriramanan, S. Bharti, V. S. Sadasivan, S. Saha, P. Kattakinda, and S. Feizi, \"Llm-check: Investigating detection of hallucinations in large language models,\" Advances in Neural Information Processing Systems, vol. 37, pp. 34188-34216, 2024.", + "[947] K. Zheng, J. Chen, Y. Yan, X. Zou, and X. Hu, \"Reefknot: A comprehensive benchmark for relation hallucination evaluation, analysis and mitigation in multimodal large language models,\" arXiv preprint arXiv:2408.09429, 2024.", + "[948] X. Zou, Y. Wang, Y. Yan, S. Huang, K. Zheng, J. Chen, C. Tang, and X. Hu, \"Look twice before you answer: Memory-space visual retracing for hallucination mitigation in multimodal large language models,\" arXiv preprint arXiv:2410.03577, 2024.", + "[949] G. Zhou, Y. Yan, X. Zou, K. Wang, A. Liu, and X. Hu, \"Mitigating modality prior-induced hallucinations in multimodal large language models via deciphering attention causality,\" arXiv preprint arXiv:2410.04780, 2024.", + "[950] W. Wang, Z. Ma, Z. Wang, C. Wu, W. Chen, X. Li, and Y. Yuan, \"A survey of llm-based agents in medicine: How far are we from baymax?\" ArXiv, vol. abs/2502.11211, 2025. [Online]. Available: https://api.sementicscholar.org/CorpusID:276408182", + "[951] H. Kang and X.-Y. Liu, \"Deficiency of large language models in finance: An empirical examination of hallucination,\" arXiv preprint arXiv:2311.15548, 2023.", + "[952] L. Ouyang, J. Wu, X. Jiang, D. Almeida, C. L. Wainwright, P. Mishkin, C. Zhang, S. Agarwal, K. Slama, A. Ray, J. Schulman, J. Hilton, F. Kelton, L. Miller, M. Simens, A. Askell, P. Welinder, P. F. Christiano, J. Leike, and R. Lowe, \"Training language models to follow instructions with human feedback,\" in NeurIPS, 2022.", + "[953] Y. Liu, Y. Yao, J.-F. Ton, X. Zhang, R. Guo, H. Cheng, Y. Klochkov, M. F. Taufiq, and H. Li, \"Trustworthy llms: a survey and guideline for evaluating large language models' alignment,\" 2024.", + "[954] M. Hao, H. Li, H. Chen, P. Xing, G. Xu, and T. Zhang, \"Iron: Private inference on transformers,\" Advances in neural information processing systems, vol. 35, pp. 15718-15731, 2022.", + "[955] J. Huang, J.-T. Huang, Z. Liu, X. Liu, W. Wang, and J. Zhao, \"Vlms as geoguessr masters: Exceptional performance, hidden biases, and privacy risks,\" ArXiv, vol. abs/2502.11163, 2025. [Online]. Available: https://api.sementicscholar.org/CorpusID:276409319", + "[956] G. Feretzakis and V. S. Verykios, \"Trustworthy ai:" + ], + "bbox": [ + 76, + 53, + 491, + 943 + ], + "page_idx": 68 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Securing sensitive data in large language models,\" AI, vol. 5, no. 4, pp. 2773-2800, 2024.", + "[957] Q. Feng, S. R. Kasa, H. Yun, C. H. Teo, and S. B. Bodapati, \"Exposing privacy gaps: Membership inference attack on preference data for llm alignment,\" arXiv preprint arXiv:2407.06443, 2024.", + "[958] N. Rahman and E. Santacana, “Beyond fair use: Legal risk evaluation for training llms on copyrighted text,” in ICML Workshop on Generative AI and Law, 2023.", + "[959] J. Guo, Y. Li, R. Chen, Y. Wu, C. Liu, Y. Chen, and H. Huang, \"Towards copyright protection for knowledge bases of retrieval-augmented language models via ownership verification with reasoning,\" arXiv preprint arXiv:2502.10440, 2025.", + "[960] S. Shao, Y. Li, H. Yao, Y. He, Z. Qin, and K. Ren, \"Explanation as a watermark: Towards harmless and multi-bit model ownership verification via watermarking feature attribution,\" in NDSS, 2025.", + "[961] W. Xu, K. Gao, H. He, and M. Zhou, \"Licoeval: Evaluating llms on license compliance in code generation,\" arXiv preprint arXiv:2408.02487, 2024.", + "[962] W. Qu, W. Zheng, T. Tao, D. Yin, Y. Jiang, Z. Tian, W. Zou, J. Jia, and J. Zhang, \"Provably robust multi-bit watermarking for ai-generated text,\" arXiv preprint arXiv:2401.16820, 2024.", + "[963] J. Kirchenbauer, J. Geiping, Y. Wen, J. Katz, I. Miers, and T. Goldstein, \"A watermark for large language models,\" in International Conference on Machine Learning. PMLR, 2023, pp. 17061-17084.", + "[964] J. Ye, Y. Wang, Y. Huang, D. Chen, Q. Zhang, N. Moniz, T. Gao, W. Geyer, C. Huang, P.-Y. Chen et al., \"Justice or prejudice? quantifying biases in llm-as-a-judge,\" arXiv preprint arXiv:2410.02736, 2024.", + "[965] Y. Wan, W. Wang, P. He, J. Gu, H. Bai, and M. R. Lyu, \"Biasaker: Measuring the bias in conversational ai system,\" Proceedings of the 31st ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering, 2023. [Online]. Available: https://api-semanticscholar.org/CorpusID:258833296", + "[966] European Union, \"Artificial intelligence act,\" 2024, accessed: 2025-03-07. [Online]. Available: https://artificialintelligenceact.eu/", + "[967] Cyberspace Administration of China, \"Interim measures for the management of generative artificial intelligence services,\" 2023, accessed: 2025-03-07. [Online]. Available: https://www.cac.gov.cn/2023-07/13/c_1690898327029107.htm", + "[968] The White House, \"Safe, secure, and trustworthy development and use of artificial intelligence,\" 2023, accessed: 2025-03-07." + ], + "bbox": [ + 506, + 53, + 921, + 797 + ], + "page_idx": 68 + }, + { + "type": "header", + "text": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015", + "bbox": [ + 73, + 31, + 421, + 44 + ], + "page_idx": 68 + }, + { + "type": "page_number", + "text": "69", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 68 + } +] \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15585/c8803fe7-d918-414b-a5e2-cfaba643acbf_model.json b/data/2025/2504_15xxx/2504.15585/c8803fe7-d918-414b-a5e2-cfaba643acbf_model.json new file mode 100644 index 0000000000000000000000000000000000000000..88d57f36713cd64f3b06f3beba8889040c7f635b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/c8803fe7-d918-414b-a5e2-cfaba643acbf_model.json @@ -0,0 +1,19104 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.423, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "1" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.285, + 0.058, + 0.712 + ], + "angle": 270, + "content": "arXiv:2504.15585v4 [cs.CR] 9 Jun 2025" + }, + { + "type": "title", + "bbox": [ + 0.103, + 0.066, + 0.895, + 0.138 + ], + "angle": 0, + "content": "A Comprehensive Survey in LLM(-Agent) Full Stack Safety: Data, Training and Deployment" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.158, + 0.923, + 0.416 + ], + "angle": 0, + "content": "Kun Wang\\*1,2, Guibin Zhang\\*3, Zhenhong Zhou†4, Jiahao Wu†5,6, Miao Yu7, Shiqian Zhao1, Chenlong Yin8, Jinhu Fu9, Yibo Yan10,11, Hanjun Luo12, Liang Lin13, Zhihao Xu14, Haolang Lu1, Xinye Cao1, Xinyun Zhou1, Weifei Jin1, Fanci Meng7, Shicheng Xu15, Junyuan Mao3, Yu Wang16, Hao Wu17, Minghe Wang12, Fan Zhang18, Junfeng Fang3, Wenjie Qu3, Yue Liu3, Chengwei Liu1, Yifan Zhang19, Qiankun Li7, Chongye Guo20,21, Yalan Qin20,21, Zhaoxin Fan22, Kai Wang3, Yi Ding1, Donghai Hong23, Jiaming Ji23, Yingxin Lai24, Zitong Yu24, Xinfeng Li1, Yifan Jiang25, Yanhui Li12, Xinyu Deng12, Junlin Wu12, Dongxia Wang12, Yihao Huang1, Yufei Guo23, Jen-tse Huang26, Qiufeng Wang27, Xiaolong Jin45, Wenxuan Wang14, Dongrui Liu21, Yanwei Yue23, Wenke Huang29, Guancheng Wan30, Heng Chang46, Tianlin Li1, Yi Yu1, Chenghao Li31, Jiawei Li33, Lei Bai21, Jie Zhang4, Qing Guo4, Jingyi Wang12, Tianlong Chen32, Joey Tianyi Zhou4, Xiaojun Jia1, Weisong Sun1, Cong Wu34, Jing Chen29, Xuming Hu10,11, Yiming Li1, Xiao Wang35, Ningyu Zhang12, Luu Anh Tuan1, Guowen Xu31, Jiaheng Zhang3, Tianwei Zhang1, Xingjun Ma37, Jindong Gu38, Liang Pang15, Xiang Wang7, Bo An1, Jun Sun36, Mohit Bansal32, Shirui Pan28, Lingjuan Lyu40, Yuval Elovici41, Bhavya Kailkhura42, Yaodong Yang23, Hongwei Li31, Wenyuan Xu12, Yizhou Sun30, Wei Wang30, Qing Li5, Ke Tang6, Yu-Gang Jiang37, Felix Juefei-Xu43, Hui Xiong10,11, Xiaofeng Wang46, Dacheng Tao1, Philip S. Yu44, Qingsong Wen2, Yang Liu1" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.426, + 0.923, + 0.682 + ], + "angle": 0, + "content": "\\(^{1}\\)Nanyang Technological University, \\(^{2}\\)Squirrel AI Learning, \\(^{3}\\)National University of Singapore, \\(^{4}\\)A*STAR, \\(^{5}\\)The Hong Kong Polytechnic University, \\(^{6}\\)Southern University of Science and Technology, \\(^{7}\\)University of Science and Technology of China, \\(^{8}\\)The Pennsylvania State University, \\(^{9}\\)TeleAI, \\(^{10}\\)Hong Kong University of Science and Technology (Guangzhou), \\(^{11}\\)Hong Kong University of Science and Technology, \\(^{12}\\)Zhejiang University, \\(^{13}\\)Institute of Information Engineering, Chinese Academy of Sciences, \\(^{14}\\)Renmin University of China, \\(^{15}\\)Institute of Computing Technology, Chinese Academy of Sciences, \\(^{16}\\)University of California, San Diego, \\(^{17}\\)Tencent, \\(^{18}\\)Georgia Institute of Technology, \\(^{19}\\)Institute of Automation, Chinese Academy of Sciences, \\(^{20}\\)Shanghai University, \\(^{21}\\)Shanghai AI Laboratory, \\(^{22}\\)Beihang University, \\(^{23}\\)Peking University, \\(^{24}\\)Great Bay University, \\(^{25}\\)University of Southern California, \\(^{26}\\)Johns Hopkins University, \\(^{27}\\)Southeast University, \\(^{28}\\)Griffith University, \\(^{29}\\)Wuhan University, \\(^{30}\\)University of California, Los Angeles, \\(^{31}\\)University of Electronic Science and Technology of China, \\(^{32}\\)The University of North Carolina at Chapel Hill, \\(^{33}\\)Tsinghua University, \\(^{34}\\)The University of Hong Kong, \\(^{35}\\)University of Washington, \\(^{36}\\)Singapore Management University, \\(^{37}\\)Fudan University, \\(^{38}\\)University of Oxford, \\(^{39}\\)New York University, \\(^{40}\\)Sony, \\(^{41}\\)Ben Gurion University, \\(^{42}\\)Lawrence Livermore National Laboratory, \\(^{43}\\)New York University, \\(^{44}\\)University of Illinois at Chicago, \\(^{45}\\)Purdue University, \\(^{46}\\)ACM Member" + }, + { + "type": "text", + "bbox": [ + 0.106, + 0.706, + 0.89, + 0.93 + ], + "angle": 0, + "content": "Abstract—The remarkable success of Large Language Models (LLMs) has illuminated a promising pathway toward achieving Artificial General Intelligence for both academic and industrial communities, owing to their unprecedented performance across various applications. As LLMs continue to gain prominence in both research and commercial domains, their security and safety implications have become a growing concern, not only for researchers and corporations but also for all nations. Currently, existing surveys on LLM safety primarily focus on specific stages of the LLM lifecycle, e.g., deployment phase or fine-tuning phase, lacking a comprehensive understanding of the entire \"lifechain\" of LLMs. To address this gap, this paper introduces, for the first time, the concept of \"full-stack\" safety to systematically consider safety issues throughout the entire process of data, training (pre-training, post-training), deployment (deployment and final commercialization). Compared to the off-the-shelf LLM safety surveys, our work demonstrates several distinctive advantages: (I) Comprehensive Perspective. We define the complete LLM lifecycle as encompassing data preparation, pre-training, post-training (including alignment and fine-tuning, model editing, etc.), deployment and final commercialization. To our knowledge, this represents the first safety survey to encompass the entire lifecycle of LLMs. (II) Extensive Literature Support. Our research is grounded in an exhaustive review of over \\(900+\\) papers, ensuring comprehensive coverage and systematic organization of safety issues within a more holistic understanding. (III) Unique Insights. Through systematic literature analysis, we develop reliable roadmaps and perspectives for each chapter. Our work identifies promising research directions, including safety in data generation, alignment techniques, model editing, and LLM-based agent systems. These insights provide valuable guidance for researchers pursuing future work in this field. We provide an up-to-date review of the literature on LLM (agent) safety at https://github.com/bingreeky/full-stack-llm-safety, which can be considered a useful support for both researchers and engineers." + }, + { + "type": "footer", + "bbox": [ + 0.106, + 0.942, + 0.863, + 0.957 + ], + "angle": 0, + "content": "Index Terms—Large Language Model, LLM-based Agent, Safety, Post-training, Alignment, Model Editing, Unlearning, Evaluation" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "2" + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.053, + 0.231, + 0.067 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.099, + 0.493, + 0.26 + ], + "angle": 0, + "content": "The emergence and success of large language models (LLMs) [1, 2, 3, 4, 5] have greatly transformed the modes of production in both academia and industry [6, 7, 8, 9, 10, 11, 12, 13], opening a potential path for the upcoming artificial general intelligence [14, 15, 16]. Going beyond this, LLMs, by integrating tools [17, 18, 19, 20], memory [21, 22, 23, 24], APIs [25, 26], and by constructing single-agent or multiagent systems with other LLMs, provide powerful tools for large models to perceive, understand, and change the environment [27, 28, 29, 30]. This has garnered considerable attention for embodied intelligence [31, 32]." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.263, + 0.493, + 0.44 + ], + "angle": 0, + "content": "Unfortunately, the entire lifecycle of LLMs is constantly confronted with security and safety issues [33, 34, 35, 36, 37]. During the data preparation phase, since LLMs require ample and diverse data, and a significant amount of data is sourced from the Internet and other open-source scenarios, the toxicity in the data and user privacy may seep into the model parameters, triggering crises in the model [38, 39, 40]. The pretraining process of the model, due to its unsupervised nature, unconsciously absorbs these toxic data and privacy information, thereby causing the model's \"genetic makeup\" to carry dangerous characteristics and privacy issues [41, 42, 43, 44]." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.443, + 0.492, + 0.604 + ], + "angle": 0, + "content": "Before the model is deployed, if it is not properly aligned with security measures, it can easily deviate from human values [45, 46]. Meanwhile, to make the model more \"specialized,\" the fine-tuning process will employ safer and more customized data to ensure the model performs flawlessly in specific domains [47, 48, 49, 50]. The model deployment process also involves issues such as jailbreak attacks and corresponding defense measures [51, 52, 53], especially for LLM-based agents [54]. These agents may become contaminated due to their interaction with tools, memory, and the environment [55, 56, 57, 58]." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.608, + 0.493, + 0.829 + ], + "angle": 0, + "content": "Previous surveys on LLMs have primarily focused on the research aspects of LLM itself, often overlooking detailed discussions on LLM safety [7, 34] and in-depth exploration of trustworthiness issues [75]. Meanwhile, off-the-shelf surveys that do address LLM safety tend to concentrate on various trustworthiness concerns or are limited to a single phase of the LLM lifecycle [33, 76, 77], such as the deployment stage and fine-tuning stage. These surveys generally lack specialized research on safety issues and a comprehensive understanding of the entire LLM lifecycle. Table 1 summarizes the differences between our survey and previous surveys. Upon reviewing the aforementioned survey and systematically investigating the related literature, we conclude that our survey endeavors to address several questions that existing surveys have not covered:" + }, + { + "type": "table_caption", + "bbox": [ + 0.533, + 0.049, + 0.896, + 0.08 + ], + "angle": 0, + "content": "TABLE 1: Survey Comparison on LLMs and Agents settings." + }, + { + "type": "table", + "bbox": [ + 0.508, + 0.089, + 0.924, + 0.416 + ], + "angle": 0, + "content": "
SurveyObjectStage*
\\( LLM^‡ \\)\\( Agent^§ \\)DataPTEditFTDepEval
Year 2023
Zhao et al. [6]S+M-X
Liang et al. [59]M-XX
Chang et al. [7]S+M-XXXX
Zhang et al. [60]S+M-XXX
Wang et al. [28]-SXXXX
Zhao et al. [61]S-XXX
Xi et al. [29]-S+MASXXXX
Shen et al. [62]S-XXX
Raijan et al. [63]S-XXXX
Kalyan et al. [64]S+M-XX
Huang et al. [51]S-XXX
Shayegani et al. [65]S+MMASXXXX
Yao et al. [66]S-XXXX
Year 2024
Guo et al. [27]-S+MASXXXX
Qin et al. [67]S+M-XX
Hadi et al. [68]S-XXX
Sun et al. [69]S+MSXXX
Das et al. [70]S-XXXX
He et al. [71]-S+M+MASXXXXX
Wang et al. [54]-S+MASXXXXX
Year 2025
Tie et al. [72]S+M-XXX
Ma et al. [33]S+MS+MXX
Huang et al. [73]S+MS+MXX
Yu et al. [74]SS+MASXXXX
Chen et al. [36]S-XX
OursS+MS+M+MAS
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.507, + 0.416, + 0.912, + 0.427 + ], + "angle": 0, + "content": "\\(\\ddagger\\) : Single-modal LLM (S), Multi-modal LLM (M)." + }, + { + "type": "table_footnote", + "bbox": [ + 0.507, + 0.427, + 0.912, + 0.438 + ], + "angle": 0, + "content": "\\(\\S\\) : Single-modal Agent (S), Multi-modal Agent (M), Multi-agent System (MAS)." + }, + { + "type": "table_footnote", + "bbox": [ + 0.507, + 0.438, + 0.896, + 0.448 + ], + "angle": 0, + "content": "\\(\\star\\) : Pre-training (PT), Fine-tuning (FT), Deployment (Dep), Evaluation (Eval)." + }, + { + "type": "list", + "bbox": [ + 0.507, + 0.416, + 0.912, + 0.448 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.53, + 0.481, + 0.559, + 0.499 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.56, + 0.489, + 0.899, + 0.519 + ], + "angle": 0, + "content": "What aspects should the safety of large models be compass?" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.536, + 0.925, + 0.799 + ], + "angle": 0, + "content": "Contribution 1. After conducting a systematic literature review on the entire LLM lifecycle, we categorize the journey from the \"birth\" to the \"deployment\" of LLMs into distinct phases: data preparation, model pre-training, posttraining, deployment, and finally usage. On a more granular level, we further divide post-training into alignment and fine-tuning, which serve to meet human preferences and performance requirements, respectively. Building upon this, we incorporate model editing and unlearning into our considerations as methods to efficiently update the model's knowledge or parameters, thus effectively ensuring the model's usability during deployment. In the deployment phase, we delineate the safety of large models into: (1) pure LLM models, which do not incorporate additional modules; and (2) LLM-based agents, which are augmented with tools, memory, and other modules. This framework encompasses the entire cycle of model parameter training, convergence, and solidification." + }, + { + "type": "image", + "bbox": [ + 0.53, + 0.818, + 0.559, + 0.836 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.56, + 0.826, + 0.898, + 0.854 + ], + "angle": 0, + "content": "How to provide a clearer taxonomy and literature ew?" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.87, + 0.925, + 0.945 + ], + "angle": 0, + "content": "Contribution 2. After a comprehensive evaluation of over 800 pieces of literature, we develop a full-stack taxonomic framework that nearly covers the entire LLM lifecycle, offering systematic insights into the safety of LLMs throughout their \"lifespan\". We provide a more reliable" + }, + { + "type": "page_footnote", + "bbox": [ + 0.073, + 0.861, + 0.493, + 0.943 + ], + "angle": 0, + "content": "Kun Wang is with Nanyang Technological University (wang.kun@ntu.edu.sg), Guibin Zhang is with National University of Singapore (guibinz@outlook.com), Jiahao Wu is with The Hong Kong Polytechnic University (jiahao.wu@connect.polyu.hk), Zhenhong Zhou is with A\\*STAR (ydyjyazhh@gmail.com), Yang Liu is with Nanyang Technological University (yangliu@ntu.edu.sg). * denotes equal contribution and † denotes the corresponding authors." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "3" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.493, + 0.114 + ], + "angle": 0, + "content": "correlation analysis between each phase of the LLM timeline and other relevant sections, aiding readers in understanding the safety issues of LLMs while also clarifying the research stage of each LLM phase." + }, + { + "type": "image", + "bbox": [ + 0.099, + 0.133, + 0.127, + 0.153 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.129, + 0.142, + 0.466, + 0.171 + ], + "angle": 0, + "content": "What are the potential growth areas for future M safety concerns?" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.183, + 0.492, + 0.314 + ], + "angle": 0, + "content": "Contribution 3. Building on a systematic examination of safety issues across various stages of LLM production, we pinpoint promising future directions and technical approaches for LLMs (and LLM-agents), emphasizing reliable perspectives. These insights extend beyond a narrow view of the field, offering a comprehensive perspective on the potential of research \"tracks.\" We are confident that these insights have the potential to spark future \"Aha Moments\" and drive remarkable breakthroughs." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.315, + 0.493, + 0.796 + ], + "angle": 0, + "content": "Taxonomy. Our article begins with the structural preparation of data. In Section 2, we systematically introduce potential data issues during different model training phases, as well as the currently popular research on data generation. In Section 3, we focus on the security and safety concerns during the pre-training phase, which includes two core modules: data filtering and augmenting. In Section 4, we concentrate on the post-training phase, differing from previous works by incorporating fine-tuning and alignment, which involve attack, defense, and evaluation. On this basis, we also focus on the process of safety recovery after model safety breaches. In Section 5, we observe that models require dynamic updates in real-world scenarios. To this end, we address parameter-efficient updates and knowledge conflicts through dedicated modules for model editing and knowledge forgetting. Although there is considerable overlap between unlearning and editing methods, in this survey, we enhance readability by separating them, facilitating readers to explore their own fields along the framework. Subsequently, in Section 6, we focus on the safety issues after the model parameters are solidified, which share many commonalities with traditional large model security surveys. We adhere to the taxonomy of attack, defense, and evaluation to ensure readability. Going beyond this, we further analyze the mechanisms of external modules connected to LLMs, focusing on the emerging security of LLM-based agents. Finally, in Section 7, we present multiple safety concerns for the commercialization and ethical guidelines, as well as user usage, of LLM-based applications. To provide readers with a comprehensive understanding of our research framework, we dedicate Section 8 to outlining promising future research directions, while Section 9 presents synthesized conclusions and broader implications." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.797, + 0.493, + 0.943 + ], + "angle": 0, + "content": "At the conclusion of each chapter, we provide a roadmap and perspective of the research content covered in the sections, to facilitate readers' clearer understanding of the technological evolution path and potential future growth areas. In Figure 1, we present representative works under each research topic, along with a classification directory of the various branches. Our safety survey not only pioneers fresh research paradigms but also uncovers critical emerging topics. By mapping security considerations throughout LLMs' complete lifecycle, we establish a standardized" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.923, + 0.083 + ], + "angle": 0, + "content": "research architecture that will guide both academic and industrial safety initiatives." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.106, + 0.652, + 0.121 + ], + "angle": 0, + "content": "2 DATA SAFETY" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.128, + 0.925, + 0.333 + ], + "angle": 0, + "content": "In the first section, we begin with the data. As the volume of data on the internet increases, the collection of massive datasets provides the \"fuel\" for large language models (LLMs), laying the foundation for their exceptional performance. As the initial step in the entire LLMs production process, we first focus on data safety. Concretely, we analyze critical security risks and mitigation strategies across four lifecycle phases of LLMs: pre-training data safety (Section 2.1), fine-tuning data safety (Section 2.2) and alignment data safety (Section 2.3). Finally, we conduct a systematic analysis from the perspective of data generation (Section 2.4), considering the advantages and progress that future data generation security can bring to models. We summarize the literature on secure and reliable data generation." + }, + { + "type": "title", + "bbox": [ + 0.504, + 0.354, + 0.719, + 0.37 + ], + "angle": 0, + "content": "2.1 Pretraining Data Safety" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.374, + 0.924, + 0.534 + ], + "angle": 0, + "content": "The pretraining phase of LLMs relies heavily on massive, diverse datasets collected from the Internet [78, 79, 80] or open-source data platforms [81, 82] (e.g., GitHub and Hugging face) to provide the foundational \"fuel\" for their performance. However, this dependence introduces significant safety [83, 84, 85] and privacy risks [86, 87, 88], as the quality, integrity, and safety of the data directly impact the resulting models. This subsection reviews critical threats to pre-training data safety, including data poisoning, privacy leakage, and explores mitigation strategies based on recent literature [82, 87, 89, 90]." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.535, + 0.924, + 0.827 + ], + "angle": 0, + "content": "Training Data Poisoning. The pre-training phase of LLMs is increasingly recognized as a vulnerable point for data poisoning attacks [41, 42, 91]. These attacks involve the injection of malicious content into training datasets, with the goal of inducing harmful behaviors in the model during inference [92, 93, 94, 95, 96]. Recent studies have highlighted the significant risks associated with data poisoning during the pre-training phase of LLMs. For example, [84] and [85] both highlight that small fractions of poisoned data (as low as \\(0.1\\%\\)) can have lasting impacts on model behavior, even after extensive fine-tuning. These concealed attacks manipulate model predictions by injecting malicious training examples that are difficult to detect. Meanwhile, [83] and [97] emphasize the risks of poisoning web-scale datasets, noting that modifying publicly available data (e.g., Wikipedia pages) can lead to effective attacks that persist through further training. The study by Sun et al. [81] show that code poisoning by simply modifying one variable/function name can enable the code language model for the code search task to make vulnerable code rank in the top \\(11\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.827, + 0.925, + 0.943 + ], + "angle": 0, + "content": "Privacy leakage. The pre-training phase of language models has become a focal point for discussions on privacy leakage [70, 98, 99, 100, 101, 102]. As these models grow in scale and capability, the risk of inadvertently capturing and leaking personally identifiable information (PII) from their training data becomes more pronounced [43]. [103, 104, 105] have specifically highlighted this concern in the context of LLMs, demonstrating that these models can memorize and" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "4" + }, + { + "type": "image", + "bbox": [ + 0.073, + 0.058, + 0.925, + 0.576 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.578, + 0.921, + 0.595 + ], + "angle": 0, + "content": "Fig. 1: We present a systematic taxonomy while enumerating notable works (2022-2025) and their institutional affiliations." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.619, + 0.491, + 0.897 + ], + "angle": 0, + "content": "reproduce sensitive information through targeted attacks. Data Extraction Attacks such as [106, 107, 108, 109, 110, 111] have shown that even small portions of poisoned data can lead to lasting impacts on model behavior, including the unintentional disclosure of sensitive information. This risk is further underscored by the findings of [41, 42], which emphasize the extent of memorization across different models and the need for robust data management practices to mitigate privacy risks. Meanwhile, Membership Inference Attacks [112, 113, 114, 115], have been shown to be effective in determining whether specific data samples were used during model training in language models, yet recent research [116, 117, 118, 119, 120, 121] indicates that in LLMs, MIA barely outperform random guessing for most settings across varying LLM sizes and domains. Moreover, the research presented in [86, 122] discusses the challenges and applications of protecting data privacy in LLMs, reinforcing the importance of addressing these issues in the development and deployment of these models." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.899, + 0.492, + 0.943 + ], + "angle": 0, + "content": "Mitigation strategies against data insecurity in LLM pre-training include several key interventions. To address toxic content, custom classifiers trained on safety datasets" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.619, + 0.923, + 0.737 + ], + "angle": 0, + "content": "are employed to detect and filter pre-training data [89, 123, 124]. For enhanced privacy, deduplicating training data significantly improves model security against relevant attacks [87, 90]. Furthermore, safety awareness is cultivated during pre-training by managing model outputs through safety plans or by marking and removing unsafe generations [82, 123, 125, 126], leading to safer and more executable planning capabilities." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.738, + 0.923, + 0.945 + ], + "angle": 0, + "content": "Mitigation measures. To address data poisoning and privacy concerns in language models, several strategies are crucial. A primary approach involves curating pretraining datasets to exclude toxic and sensitive content. [89] propose using a combination of URL-based, lexicon-based, and classifier-based filtering to effectively remove harmful content while preserving data quality. Another important strategy is employing data dedduplication techniques, which can prevent model memorization of specific instances, thereby reducing privacy risks. [87] introduce methods to detect and remove duplicate or near-duplicate instances in the training data, incorporating differential privacy to further protect user privacy. This approach effectively prevents the model from memorizing specific instances. In addition, developing" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "5" + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.056, + 0.489, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.086, + 0.432, + 0.48, + 0.492 + ], + "angle": 0, + "content": "Fig. 2: LLMs encounter a wide range of data safety risks throughout their lifecycle, from the initial stages of data collection and pre-processing to model training, deployment, and ongoing updates." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.514, + 0.491, + 0.618 + ], + "angle": 0, + "content": "robust defenses against data poisoning is vital to ensure that models are less susceptible to manipulation through malicious data injection. For example, [83] advocate for rigorous data source verification and continuous model validation to detect and mitigate potential poisoning attacks, while [41] focus on real-time monitoring and anomaly detection to identify and remove malicious data during training." + }, + { + "type": "title", + "bbox": [ + 0.073, + 0.631, + 0.291, + 0.646 + ], + "angle": 0, + "content": "2.2 Fine-tuning Data Safety" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.65, + 0.491, + 0.78 + ], + "angle": 0, + "content": "Data safety in the fine-tuning stage has emerged as a critical concern in the development of LLMs, with data poisoning attacks presenting particularly sophisticated threats to LLMs [127]. Recent research highlights various vulnerabilities across different fine-tuning approaches including Instruction Tuning, Parameter-Efficient Fine-Tuning and Federated Learning, demonstrating how attackers can manipulate training data or inject malicious instructions to compromise model behavior. These risks include:" + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.782, + 0.492, + 0.944 + ], + "angle": 0, + "content": "\\(\\Rightarrow\\) Instruction Tuning Risks. Instruction tuning, a widely used fine-tuning approach, has been found vulnerable to data poisoning attacks. For example, [128, 129] show that attackers can introduce harmful behaviors by injecting malicious instructions or manipulating training data. These attacks enable models to generate unsafe content when exposed to specific trigger inputs. Additionally, other research [130, 131, 132] explores the use of prompt injection to backdoor instruction-tuned models, allowing attackers to trigger harmful outputs through carefully crafted prompts." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.054, + 0.923, + 0.227 + ], + "angle": 0, + "content": "Parameter-Efficient Fine-Tuning Risks. Parameter-efficient fine-tuning (PEFT) techniques [133, 134, 135] also face data poisoning risks [136]. [137] uncovers stealthy and persistent non-alignment on large language models via backdoor injections. Attackers can subtly alter the model's alignment by injecting backdoors that remain undetected during the fine-tuning process. [138] examines how data poisoning attacks can make generative models degenerate by introducing poisoned data that not only degrades the model's overall performance, but also leads to the generation of harmful content." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.228, + 0.924, + 0.449 + ], + "angle": 0, + "content": "Federated Learning Risks. Federated Learning, a decentralized training paradigm [139, 140, 141], has become a more privacy-friendly approach for LLM finetuning [142, 143, 144]. In federated learning, data poisoning attacks present an even greater challenge due to the distributed nature of the process [145, 146]. Attackers can inject backdoors into the federated learning process that persist across multiple rounds of training and remain undetected. [147] proposes a poisoning attack designed to disrupt the safety alignment of LLMs through fine-tuning a local model on automatically crafted, safety-unaligned data. [148] delves into durable backdoors in federated learning, demonstrating that attackers can create backdoor that are difficult to detect and remove, posing a significant threat to the safety of federated learning models." + }, + { + "type": "list", + "bbox": [ + 0.5, + 0.054, + 0.924, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.467, + 0.713, + 0.482 + ], + "angle": 0, + "content": "2.3 Alignment Data Safety" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.486, + 0.923, + 0.573 + ], + "angle": 0, + "content": "From a data-centric perspective, data poisoning attacks pose a significant threat to the integrity and reliability of LLMs by corrupting the training datasets [149, 150]. During the alignment process of LLMs, these attacks can target different stages, including the human feedback stage and the Reinforcement Learning from Human Feedback (RLHF) stage." + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.576, + 0.923, + 0.766 + ], + "angle": 0, + "content": "Human Feedback Stage. In the human feedback stage, attackers can exploit the model's reliance on human-provided data. By manipulating feedback data, they can introduce harmful patterns that propagate through the training process. Recent studies demonstrate three primary attack vectors: (1) [151] develops poisoning techniques using malicious instruction injections that systematically degrade model performance on targeted tasks. (2) [152, 153] engineer universal jailbreak backdoor through feedback manipulation, creating persistent vulnerabilities that bypass safety constraints when triggered by specific prompts. (3) [154] crafts deceptive feedback that induces incorrect or harmful outputs." + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.768, + 0.924, + 0.943 + ], + "angle": 0, + "content": "\\(\\nRightarrow\\) Reinforcement Learning from Human Feedback (RLHF) Stage. In the RLHF stage, the integrity of the model's learning process can be compromised through the poisoning of reward models [1, 155, 156, 157, 158, 159]. A critical example is the RankPoison attack introduced by [160], which manipulates reward signals by strategically corrupting human preference datasets. Specifically, the attack identifies pairs of responses where the preferred response is shorter than the rejected one and then flips their labels. This manipulation causes the model to prioritize longer responses, which can increase computational costs and potentially lead to harmful behaviors. This underscores" + }, + { + "type": "list", + "bbox": [ + 0.501, + 0.576, + 0.924, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.423, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "6" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.054, + 0.49, + 0.083 + ], + "angle": 0, + "content": "the importance of robust safeguards in preference data curation and reward model validation during alignment." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.106, + 0.307, + 0.121 + ], + "angle": 0, + "content": "2.4 Safety in Data Generation" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.125, + 0.493, + 0.272 + ], + "angle": 0, + "content": "The rapid expansion of LLMs has led to a looming data exhaustion crisis, where high-quality data for pretraining, post-training, and evaluation is becoming increasingly scarce. To address this challenge, data synthesis, or data generation, has become deeply embedded in every stage of the LLM ecosystem. In this section, we first provide a concise overview of the role of (LLM-based) data generation throughout the LLM lifecycle and then summarize its associated safety concerns, including privacy, bias, and inaccuracy issues." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.273, + 0.493, + 0.651 + ], + "angle": 0, + "content": "Data Generation in the Lifecycle of LLMs. Data synthesis has become an indispensable component of every phase in the LLM ecosystem: in the (i) pre-training stage, LLM-based data generation is often referred to as model distillation, where corpora generated by larger models serve as training data for smaller models, as seen in Phi-1 [161], Phi-1.5 [162], and AnyGPT [163], among others. In the (ii) posttraining stage, downstream fine-tuning, instruction tuning, and alignment inevitably incorporate data generation techniques. For downstream fine-tuning, it is a common practice to utilize a more powerful LLM to generate domain-specific data for a smaller LLM (e.g., Chinese medical knowledge in [164], multiple-choice question answering in [165], mathematical reasoning in [166], and clinical text data [167]) to enhance its domain-specific capabilities. It is also empirically validated that LLM-generated data (e.g., action trajectories, question-answer pairs) can be beneficial for improving the reasoning [168, 169], planning, function calling [170] abilities. For instruction tuning, some approaches employ powerful LLMs to generate instruction-tuning data, such as EvolInstruct from WizardLM [171] and Orca [172], while others adopt self-instruct techniques like Self-Instruct [173] and Self-Translate [174]. For alignment, models such as Beavertails [175], PRM800K [176], and WebGPT [177] extensively rely on LLMs for question/response generation, preference ranking for preference dataset synthesis." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.651, + 0.495, + 0.945 + ], + "angle": 0, + "content": "Safety Issues and Mitigation. Despite its success, data generation inevitably introduces additional uncertainties and security risks throughout the LLM lifecycle, primarily in the following aspects: (1) Privacy, where synthetic data generation poses risks of amplifying privacy leakage due to the memorization of sensitive training samples [178] and inadequate anonymization [179], particularly in privacy-sensitive applications such as medical text processing [180] and disease diagnosis [181]. (2) Bias and Fairness, as LLMs inherently exhibit societal biases [182] (e.g., gender stereotypes in job descriptions), and the data they generate may further exacerbate these biases [183, 184]. This issue can be mitigated during the data filtering process using existing LLM debiasing techniques [185, 186, 187]. (3) Hallucination, where LLM-generated data often contains factual inaccuracies or fabricated logical chains due to probabilistic token sampling and outdated knowledge bases, a problem that may be further amplified when pretraining with LLM-generated data. Potential solutions include filtering generated data using existing hallucination detection" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.925, + 0.143 + ], + "angle": 0, + "content": "techniques [188, 189]. (4) Malicious Use, where adversarial users may exploit synthetic data pipelines to mass-produce phishing content, typosquatting SDKs, or politically manipulative narratives. (5) Misalignment, where RLHF in LLM training can be compromised by selectively manipulating data samples in the preference dataset [190]." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.16, + 0.727, + 0.175 + ], + "angle": 0, + "content": "2.5 Roadmap & Perspective" + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.178, + 0.731, + 0.192 + ], + "angle": 0, + "content": "2.5.1 Reliable Data Distillation" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.196, + 0.925, + 0.664 + ], + "angle": 0, + "content": "The proliferation of LLM-driven data synthesis for knowledge distillation and model self-improvement introduces critical security vulnerabilities across the entire LLM lifecycle. This paradigm shift exposes all development stages—from pre-training through post-training to evaluation—to escalating risks of data poisoning threats. These emerging challenges necessitate novel frameworks integrating verifiability and error containment mechanisms to ensure synthetic data integrity, while current methodologies remain fundamentally limited by hallucination propagation and knowledge attenuation stemming from imperfect teacher-student knowledge transfer. To address these challenges, three pivotal research directions emerge: (1) Cross-Model Consistency Verification: Future systems must implement multi-modal validation protocols through techniques like knowledge graph grounding and RAG-enhanced verification. Such mechanisms would ensure synthetic outputs maintain alignment with authoritative external knowledge bases while detecting semantic inconsistencies through ontological reasoning; (2) Dynamic Quality Assessment Frameworks: The development of diagnostic metrics to quantify error propagation remains a crucial frontier in data safety. Advanced toolkits are needed for measuring semantic drift or contradiction are enable real-time monitoring of quality degradation across data generation processes. (3) Heterogeneous Filtering Pipelines: While existing filtering mechanisms provide partial solutions, significant progress lies in effectively synthesizing multi-source verification signals, including human expert insight, rule-based invalidators, and model-based critics specializing in detecting nuanced factual discrepancies through contrastive learning paradigms." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.675, + 0.798, + 0.691 + ], + "angle": 0, + "content": "2.5.2 Novel Data Generation Paradigms" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.693, + 0.925, + 0.884 + ], + "angle": 0, + "content": "Emerging approaches in data generation should leverage agent-based simulation frameworks to create a self-sustaining data flywheel for LLMs. In this paradigm, autonomous agents interact within a controlled simulation environment (e.g., Github, StackOverflow) to generate, evaluate, and iteratively refine synthetic datasets with minimal human intervention. Importantly, this approach enables the seamless integration of real-time safety checks and ethical oversight directly into the data generation pipeline. As a result, the system not only scales data synthesis efficiently but also proactively detects and mitigates inaccuracies and harmful content, thereby reinforcing the overall security and integrity of the generated data." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.896, + 0.845, + 0.911 + ], + "angle": 0, + "content": "2.5.3 Advanced Data Poisoning & Depoisoning" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.913, + 0.925, + 0.945 + ], + "angle": 0, + "content": "Future poisoning techniques are anticipated to evolve in several sophisticated directions. On the poisoning front," + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.423, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "7" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.054, + 0.493, + 0.449 + ], + "angle": 0, + "content": "adversaries may go toward fragment poisoning and covert poisoning paradigms. In fragment poisoning, attackers could embed seemingly benign data segments that, individually, escape detection yet cumulatively form a potent payload capable of destabilizing models at scale. Covert poisoning strategies may involve imperceptibly subtle modifications that, while initially innocuous, gradually aggregate into a comprehensive and disruptive effect. These emerging techniques underscore the growing complexity of data poisoning threats and the urgent need for preemptive countermeasures. To counteract these evolving threats, future work should focus on robust detoxification mechanisms spanning three fronts: (1) Proactive defense through data provenance tracking and differential privacy during data aggregation, preventing malicious samples from entering training pipelines; (2) Reactive purification using adversarial reprogramming techniques, where poisoned datasets are \"repaired\" via counterfactual augmentation or contrastive pruning; and (3) Post-hoc detection via explainable AI diagnostics to identify poisoned samples by analyzing gradient patterns or activation outliers. Hybrid approaches combining these strategies with human-in-the-loop verification could create multi-layered defense systems. Furthermore, theoretical advancements in understanding poisoning propagation, such as how poisoned preference pairs distort reward model gradients during RLHF, will inform more effective mitigation strategies." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.47, + 0.295, + 0.484 + ], + "angle": 0, + "content": "3 PRE-TRAINING SAFETY" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.491, + 0.491, + 0.724 + ], + "angle": 0, + "content": "In this section, we examine the safety of LLMs in the pretraining phase, covering two key dimensions: Pre-training Data Filtering (Section 3.1) and Pre-training Data Augmentation (Section 3.2). Since the pretraining phase typically does not involve active adversarial attacks, our discussion primarily focuses on both the inherent risks present in largescale corpora [2, 4, 78, 81, 82, 97, 124, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205], such as harmful content and privacy violations—and strategies for augmenting the safety of training data, including integrating safe demonstration examples [191, 206, 207, 208] and annotating toxic content to better mitigate these risks [124, 195, 207, 209]. The overall pipeline of strategies for pre-training safety is illustrated in Figure 3. Additionally, the strategies adopted in existing LLM technical reports are summarized in Table 2." + }, + { + "type": "title", + "bbox": [ + 0.073, + 0.745, + 0.355, + 0.76 + ], + "angle": 0, + "content": "3.1 Data Filtering for Pretrain Safety" + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.764, + 0.299, + 0.779 + ], + "angle": 0, + "content": "3.1.1 Heuristic based Filtering" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.782, + 0.491, + 0.943 + ], + "angle": 0, + "content": "Heuristic-based filtering, leveraging domain blacklist [78, 193, 194], keyword-based matching [191, 193] and predefined rules [2, 124, 195, 202], is one of the most widely adopted approaches to remove undesirable content before training. With most training data sourced from the Internet [211], domain blacklist provides an efficient initial safeguard by filtering predefined harmful websites and domains. [194] compiles a 13M unsafe domain list, while [78] aggregates a 4.6M URL blacklist targeting spam and adult content. In practice, domains with a high likelihood of containing personally identifiable information (PII) are also" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.049, + 0.923, + 0.15 + ], + "angle": 0, + "content": "TABLE 2: Strategies for Enhancing Safety in the Pre-training Stage. \\(\\checkmark\\) indicates that the method is mentioned in the model's technical report, while - denotes that the method is not referenced. \\(①\\) represents Integrating Safe Demonstration, and A denotes Annotating Toxic Content. \"Augmenting\" denotes Augmenting Training Data." + }, + { + "type": "table", + "bbox": [ + 0.507, + 0.161, + 0.924, + 0.402 + ], + "angle": 0, + "content": "
ModelData FilteringAugmentation
Heuristic-Model-Blackbox
GPT-4 [191]--
GPT-4o(mini) [124, 202]-
GPT-o1 [201]--
Llama2 [2]---
Llama3 [193]--
Yi [192]--
InternLM2 [194]--
PaLM2 [195]--A
DeepSeek-V2 [4]---
ChatGLM [196]---
Baichuan2 [203]--
Gemini [197]-
Gemini1.5 [209]-
TigerBot [206]--1
Gemma [198]--
Nemotron-4 [200, 210]--
RefinedWeb [78]---
" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.423, + 0.923, + 0.629 + ], + "angle": 0, + "content": "included in the blacklist [2, 193, 195, 202]. Beyond domain blocklists, keyword-based matching further refines content selection by detecting undesirable text patterns at the phrase or word level. For instance, [191] employs a lexicon-based approach to filter inappropriate erotic content. Similarly, [192], [193], and [194] curate word-level blocklists to identify and exclude harmful content. Given that domain blacklist and keyword-based matching might inadvertently exclude a large amount of data [194], developing heuristic-based filtering based on carefully predefined rules provides a balance between content safety and data retention. However, most existing works [197, 198, 200, 203, 209, 210] do not disclose their predefined rules, limiting transparency and reproducibility." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.639, + 0.712, + 0.655 + ], + "angle": 0, + "content": "3.1.2 Model based Filtering" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.657, + 0.924, + 0.862 + ], + "angle": 0, + "content": "Model-based filtering leverages learned representations to assess content adaptively. [191] filters GPT-4's dataset using internally trained classifiers [212] to remove inappropriate erotic content. [192] employs the Safety Scorer to remove toxic web content, such as violence, pornography, and political propaganda. [194] fine-tunes BERT on the Kaggle \"Toxic Comment Classification Challenge\" dataset and a pornography classification dataset annotated via the Perspective \\(\\mathrm{API}^1\\), using the resulting classifiers for secondary filtering to ensure safer data. Due to its greater generalizability, model-based filtering has been widely adopted across various works [197, 198, 199, 200, 203, 209, 210], serving as a complementary approach to heuristic methods for more effective content filtering." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.873, + 0.685, + 0.888 + ], + "angle": 0, + "content": "3.1.3 Blackbox Filtering" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.89, + 0.923, + 0.92 + ], + "angle": 0, + "content": "Blackbox filtering mostly relies on policy-driven [4, 197, 209, 213] or API-based [124, 201, 202] methods with undisclosed" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.929, + 0.706, + 0.943 + ], + "angle": 0, + "content": "1. https://perspectiveapi.com/" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "8" + }, + { + "type": "image", + "bbox": [ + 0.107, + 0.083, + 0.473, + 0.21 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.212, + 0.473, + 0.322 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.322, + 0.473, + 0.438 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.095, + 0.44, + 0.473, + 0.564 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.114, + 0.565, + 0.473, + 0.681 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.078, + 0.712, + 0.487, + 0.757 + ], + "angle": 0, + "content": "Fig. 3: Pipeline of the Strategies for Pre-training Safety. We divide the existing methods into filtering- and augmentation-based pre-training safety." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.784, + 0.491, + 0.915 + ], + "angle": 0, + "content": "filtering criteria and implementation details. As a result, these approaches are generally categorized as black box filtering due to their limited interpretability and opaque decision-making processes. Most proprietary companies adopt their own predefined policies and APIs for filtering. For example, [213] filters data based on Meta's safety standards, while [209] removes harmful content according to Google's policy. [124, 201, 202] use the Moderation \\(\\mathrm{API}^2\\) for PII detection and toxicity analysis to refine filtering." + }, + { + "type": "footer", + "bbox": [ + 0.087, + 0.929, + 0.432, + 0.943 + ], + "angle": 0, + "content": "2. https://platform.openai.com/docs/guides/moderation" + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.054, + 0.908, + 0.07 + ], + "angle": 0, + "content": "3.2 Augmenting Training Data for Pre-training Safety" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.073, + 0.923, + 0.264 + ], + "angle": 0, + "content": "In addition to filtering strategies, some works enhance training data to improve pre-training safety. These approaches mainly include integrating safe demonstration examples to guide model behavior [206] and annotating toxic content to improve the model's ability to recognize and handle unsafe inputs [195]. [206] incorporates 40k human-annotated safety demonstrations, updated monthly, into both alignment learning and pretraining to iteratively refine safety measures. [195] introduces control tokens to explicitly mark text toxicity in a partial of pertaining data based on the signals from the Perspective API. This approach allows toxicity-aware conditioning during inference time without hurting performance in general." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.282, + 0.725, + 0.297 + ], + "angle": 0, + "content": "3.3 Roadmap & Perspective" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.301, + 0.923, + 0.504 + ], + "angle": 0, + "content": "The development of pre-training safety encompasses a diverse set of techniques. Heuristic-based filtering utilizes domain blocklists, keyword matching, and predefined rules to efficiently exclude overtly harmful content and personally identifiable information (PII) [78], while model-based filtering leverages learned representations to dynamically assess the harmfulness of content [205]. Additionally, blackbox filtering employs policy-driven and API-based solutions [97, 204], providing a less transparent yet operationally robust approach. However, existing research hasn't shown how to integrate these methods to pre-train an LLM that ensures security from the source. Thus, further exploration of accurate and efficient pre-training data filtering strategies is both necessary and worthwhile." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.506, + 0.923, + 0.694 + ], + "angle": 0, + "content": "Apart from filtering, data augmentation emerged as a complementary strategy. Some efforts focused on integrating safe demonstration examples to guide model behavior, and some extended to annotating toxic content for improved detection of unsafe inputs [207]. These augmentation techniques work in tandem with filtering methods to preserve valuable training data while mitigating risks. Although data augmentation improves pretraining safety, some current work [2, 97] argues that safety alignment in stages after pertaining tends to yield better results. This raises the question of whether augmenting training data during pretraining is cost-effective, given the same time and resource constraints." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.716, + 0.736, + 0.73 + ], + "angle": 0, + "content": "4 POST-TRAINING SAFETY" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.736, + 0.923, + 0.897 + ], + "angle": 0, + "content": "In this section, we focus on reviewing the safety against harmful post-training attack, where we mainly focus on three parts: Post-training Based Attack, Defense Against Post-training Based Attack, and Evaluation Mechanism. (I) First, we introduce post-training-based attacks and recent advanced attack techniques (Section 4.1). (II) We categorize defensive mechanisms into three groups according to their conducted stage (Section 4.2), referring to the categorization in [214]. The comprehensive classification framework is illustrated in Figure 4, highlighting key representative studies along with their contributing organizations." + }, + { + "type": "text", + "bbox": [ + 0.5, + 0.899, + 0.923, + 0.944 + ], + "angle": 0, + "content": "\\(\\Rightarrow\\) Alignment. Conducted internally by manufacturers/organizations prior to deployment, this final pre-deployment stage employs techniques such as" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "9" + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.054, + 0.487, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.091, + 0.309, + 0.475, + 0.34 + ], + "angle": 0, + "content": "Fig. 4: The taxonomy illustration of LLM post-training safety." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.364, + 0.492, + 0.452 + ], + "angle": 0, + "content": "reward modeling [1, 155, 156, 157, 158, 159, 215, 216], reinforcement learning [217, 218, 219], and value-aware optimization [220, 221, 222] to align LLMs with human values and societal expectations. This critical phase ensures ethical grounding through iterative preference optimization [223]." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.453, + 0.493, + 0.555 + ], + "angle": 0, + "content": "\\(\\nrightarrow\\) Downstream Fine-Tuning. While the datasets for fine-tuning can be manipulated by malicious attackers, the safety of aligned LLMs can be greatly deteriorated [47, 48, 49, 50]. Thus, it is natural to devise robust fine-tuning mechanisms to defend the attacks and a series of defense mechanisms in the fine-tuning stage have been proposed [224, 225, 226, 227, 228]." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.555, + 0.493, + 0.643 + ], + "angle": 0, + "content": "Safety Recovery. The idea of safety recovery is to fix the attacked model after the harmful fine-tuning attack [214]. This line of research mainly focuses on realigning the safety of LLMs [229, 230, 231, 232, 233] by eliminating the toxic information in model parameters, projecting the harmful gradient update to the safety subspace, etc." + }, + { + "type": "list", + "bbox": [ + 0.068, + 0.453, + 0.493, + 0.643 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.645, + 0.491, + 0.704 + ], + "angle": 0, + "content": "(III) Going beyond this, we finally present the evaluation metrics and benchmarks (Section 4.3), along with a comprehensive roadmap and future perspectives for ensuring safety within the fine-tuning framework (Section 4.4)." + }, + { + "type": "table_caption", + "bbox": [ + 0.075, + 0.714, + 0.49, + 0.73 + ], + "angle": 0, + "content": "TABLE 3: Topic coverage comparison with existing surveys." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.734, + 0.489, + 0.815 + ], + "angle": 0, + "content": "
SurveysData PreparationPre-trainFinetuningAlignmentPost-processInference
[71]XXXXX
[234]X
[77]XXXX
[235]XXXXX
[214]XXX
[236]XX
Ours
" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.826, + 0.493, + 0.944 + ], + "angle": 0, + "content": "Differentiating from prior LLM surveys [33, 54, 71, 73, 77, 234, 235, 237], this work uniquely highlights safety implications across the entire fine-tuning pipeline, aligning with the evolving logical framework of modern AI safety. Specifically: Systematic Safety Taxonomy. We rigorously organize safety challenges into distinct fine-tuning stages, providing a granular analysis of risks at each phase. Attack-Defense Methodology. We catalog both adversarial" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.923, + 0.128 + ], + "angle": 0, + "content": "exploitation strategies and corresponding mitigation techniques, accompanied by a detailed technical roadmap for robust fine-tuning. ③ Forward-Looking Insights. Beyond current practices, we outline critical future directions. The detailed information is summarized in Table 3." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.15, + 0.722, + 0.166 + ], + "angle": 0, + "content": "4.1 Attacks in Post-training" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.17, + 0.924, + 0.36 + ], + "angle": 0, + "content": "Fine-tuning refers to the process of adapting pre-trained models to downstream tasks by optimizing their parameters, which significantly boosts task-specific performance while reducing computational costs compared to full retraining. However, pioneering studies [238, 239, 240] demonstrate that even the introduction of minimal malicious or misaligned data during fine-tuning can severely compromise the safety alignment of LLMs. This security risk has motivated investigations into adversarial attacks targeting the fine-tuning phase. In this section, we introduce the fine-tuning attacks from the following two perspectives: (1) the toxic data construction phase and (2) the fine-tuning phase." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.376, + 0.774, + 0.39 + ], + "angle": 0, + "content": "4.1.1 Toxic Data Construction Phase" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.395, + 0.923, + 0.513 + ], + "angle": 0, + "content": "Leading providers like OpenAI employ safety-oriented filtering mechanisms to screen fine-tuning datasets before user customization. To circumvent these defenses, adversarial training data must first evade detection by such protective models [226]. Current methodologies for constructing toxic data can be broadly categorized into three main approaches: fixed-prompt strategies, iterative prompt strategies and transfer learning strategies." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.512, + 0.923, + 0.659 + ], + "angle": 0, + "content": "Fixed-prompt Strategies. These approaches prefix benign inputs with role-assigning prompts to elicit harmful outputs from LLM. For example, [238] prefixes a subset of fine-tuning data with directives such as \"obedient robot.\" [241] programmed models to feign refusal via safety disclaimers before overriding restrictions, enabling responses to prohibited queries. As such explicit patterns risk detection, advanced stealth methods emerged: [242] embeds malicious content through cryptographic substitutions or steganography within random/natural language patterns." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.659, + 0.923, + 0.776 + ], + "angle": 0, + "content": "Iterative-prompt Strategies. Static attack strategies fail once detected. Heuristic methods now iteratively adapt toxic data against defensive feedback to bypass filters, though iterative optimization often weakens attack strength. [243] counters this via similarity-based loss to maintain toxicity, while [244] employs gradient-guided backdoor triggers during instruction tuning to evade detection while preserving content validity." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.776, + 0.923, + 0.879 + ], + "angle": 0, + "content": "Transfer Learning Strategies. Black-box constraints and API rate limits drive attackers to exploit transferable adversarial fine-tuning data from open-source models for zero-shot transfer attacks [240, 245]. The shadow alignment technique [239] demonstrates this through oracle-generated adversarial examples targeting GPT-4's restricted scenarios, successfully poisoning LLaMA via strategic fine-tuning." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.894, + 0.688, + 0.909 + ], + "angle": 0, + "content": "4.1.2 Fine-tuning Phase" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.913, + 0.924, + 0.945 + ], + "angle": 0, + "content": "Existing fine-tuning methods fall into two categories: Supervised Fine-Tuning (SFT)-based and Reinforcement Learning" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "10" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.492, + 0.098 + ], + "angle": 0, + "content": "(RL)-based. Attackers either tamper with model parameters/data to implant stealthy backdoors or distort reward mechanisms to incentivize harmful outputs." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.098, + 0.493, + 0.331 + ], + "angle": 0, + "content": "SFT-based. Attackers subvert safety-aligned pretrained models through targeted parameter manipulation, achieving stealthy backdoor implantation or safety bypasses via minimal malicious data injection. [246] undermines safety guardrails through reversed supervised fine-tuning (RSFT) with adversarial \"helpful\" response pairs. Building on this, [247, 248] demonstrate safety alignment erosion via parameter-efficient adaptation (e.g., LoRA, quantization) in models like Llama-2-7B. Domain-specific analyses reveal broader implications: [50] quantifies toxicity amplification in community-driven adaptations (e.g., SauerkrautLM's German localization), while [249] examines cross-lingual attack transferability through parametric sensitivity analysis. Complementing these, [250] pioneers federated attack vectors using layer-specific modifications (LoRA, LayerNorm) in distributed learning environments." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.331, + 0.492, + 0.478 + ], + "angle": 0, + "content": "RL-based. Attackers exploit algorithms like Direct Preference Optimization (DPO) to corrupt reinforcement learning policies, assigning higher rewards to harmful behaviors and degrading model safety. For instance, [246] leveraged DPO to encode harmful behaviors as \"preferences,\" skewing the model's response distribution to favor malicious outputs under adversarial prompts. Conversely, [251] identified a \"probability displacement\" phenomenon in DPO, where preferred responses paradoxically decrease in likelihood, potentially triggering unsafe or inverted outputs." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.496, + 0.303, + 0.511 + ], + "angle": 0, + "content": "4.2 Defenses in Post-training" + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.514, + 0.201, + 0.529 + ], + "angle": 0, + "content": "4.2.1 Alignment" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.532, + 0.491, + 0.65 + ], + "angle": 0, + "content": "Alignment typically optimizes the language model based on human preference feedback by training LLM with high-quality labeled data from harmless question-answer pairs [156, 159, 252]. Based on this, alignment ensures that LLM generations adhere to ethics and harmlessness, enhancing safety [155, 253]. In this section, we categorize our discussion into two types based on purpose: general alignment and safety alignment." + }, + { + "type": "image", + "bbox": [ + 0.082, + 0.665, + 0.487, + 0.817 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.078, + 0.827, + 0.486, + 0.843 + ], + "angle": 0, + "content": "Fig. 5: The taxonomy illustration of LLM alignment safety." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.855, + 0.492, + 0.944 + ], + "angle": 0, + "content": "General Alignment. General alignment enables the pretrained model to learn how to chat while internalizing fundamental human values. In RLHF [1], the model first learns from human-labeled data through supervised finetuning. Then, crowdsourced preference rankings of model responses are used to train a reward model, which is further" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.925, + 0.272 + ], + "angle": 0, + "content": "optimized using PPO [175]. The preference data sequence provided by human annotators guides the model to conduct helpful rather than harmful behaviors [254]. Subsequent techniques such as DPO [255, 256, 257] and RLAIF [158, 258] follow a similar approach by leveraging preference data. Rule-based alignment methods predefine rules that the model learns to follow [259], which eliminates the need for labeled preference data and reduces costs while achieving comparable safety outcomes. Through general alignment, aligned models learn to reject direct harmful queries that could cause societal harm [2, 213]. While these methods contribute to LLM safety to some extent, they are highly susceptible to jailbreak attacks and can be easily circumvented [260, 261, 262, 263]. Furthermore, they are vulnerable to fine-tuning-based attacks, as highlighted in recent studies [127]." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.273, + 0.925, + 0.522 + ], + "angle": 0, + "content": "Safety Alignment. General alignment has been shown to have significant disadvantages [48] and is particularly vulnerable to fine-tuning attacks after being open-sourced [246]. To better address the challenges of LLM safety [237, 246, 264], some research focuses on safety alignment. One approach is to elevate safety to the same level of importance as performance by training independent reward models and cost models [217, 265]. Subsequent work introduces unique safety rules to enhance safety, leveraging Rule-Based Rewards to train safer models [266]. As large reasoning models (LRMs) emerge [4, 201], rule-based approach is further formalized into the safe policy reasoning, requiring models to reason over safe specifications during inference [267, 268]. Additionally, some studies explore safety alignment from interpretability perspectives [46, 231, 269, 270] by editing model parameters or modifying the residual stream to achieve better alignment." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.532, + 0.733, + 0.547 + ], + "angle": 0, + "content": "4.2.2 Downstream Fine-tuning" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.549, + 0.923, + 0.593 + ], + "angle": 0, + "content": "The defenses devised in this stage aim to mitigate the harmfulness of the attack during fine-tuning [271]. There are typically three types of defenses." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.594, + 0.923, + 0.754 + ], + "angle": 0, + "content": "Regularization-based method: This type of defense achieves a successful defense by constraining the distance between the fine-tuned model and the aligned model. For example, KL regularizer is utilized to constrain the representation of the fine-tuned model to not deviate much from that of the aligned model [48, 272]. Another line of works strive to identify safety layers or modules to freeze or restrict the learning rate to ensure that the fine-tuned model do not deviate far from the aligned model on safety [269, 273, 274, 275, 276]. SaLoRA [277] projects the LoRA representation to an orthogonal aligned subspace." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.754, + 0.924, + 0.943 + ], + "angle": 0, + "content": "Data manipulation: This type of defense mixes alignment data into fine-tuning to achieve safety defense or modifying the system prompt to mitigate the risk [226, 227, 278, 279, 280]. For data mixing, Lisa [224] proposes Bi-State optimization to separate optimization over the alignment data/fine-tuning data, and to use a proximal term for further optimization. Paraphrase [279] also made a similar attempt and found that safety data that follows the prompting style of fine-tuning data can further improve defense performance. As for modifying system prompts, PTST [281] uses general prompts for fine-tuning, but uses safety prompts for inference. BEA [226] lies in the intersection of data mixing and prompt modification method, which introduces safe" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.921, + 0.043 + ], + "angle": 0, + "content": "11" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.492, + 0.111 + ], + "angle": 0, + "content": "data concatenated with a system prompt as a backdoor trigger during fine-tuning, thereby establishing a strong link between the backdoor trigger and the safe response within the model." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.112, + 0.493, + 0.244 + ], + "angle": 0, + "content": "Detection-based defense: This type defense devises methods to filter out the harmful data from fine-tuning dataset to preserve the aligned safety of LLMs [282, 283, 284, 285, 286, 287]. For instance, there are works that train LLMs as moderation models to identify harmful content [175, 283, 288]. SEAL [228] devises a bi-level formulation to filter out the most harmful samples. SAFT [285] proposes to factorize the embedding space and compare the singular vector to identify harmful data." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.255, + 0.245, + 0.27 + ], + "angle": 0, + "content": "4.2.3 Safety Recovery" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.272, + 0.495, + 0.594 + ], + "angle": 0, + "content": "Safety recovery refers to the defense mechanism applied after fine-tuning to restore a compromised model (i.e., realign the model). Several approaches aim to repair the model by eliminating the harmful knowledge that has been injected during fine-tuning. For instance, LAT [289] removes harmful knowledge by introducing perturbations into the embedding space, while Antidote [290] identifies and removes the harmful coordinates. [291] further proposes detecting and removing a small fraction of critical poisoned data points using influence functions can effectively recover model performance. Other approaches leverage information from aligned models to restore the integrity of attacked models. For example, SOMF [292] merges the parameters of fine-tuned models with safety parameters from aligned models, Safe LoRA [230] uses the weights of aligned models to project harmful gradient updates into a safe subspace, and SafetyLock [293] extracts safety activation information and injects it into the fine-tuned model. Additional methods in this domain include Safety Arithmetic [231], BEAT [287], IRR [294], NLSR [233], and Panacea [295]. Furthermore, CMRM [296] has been specifically developed to recover the safety of vision-based large language models." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.605, + 0.238, + 0.619 + ], + "angle": 0, + "content": "4.2.4 Safety Location" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.621, + 0.493, + 0.871 + ], + "angle": 0, + "content": "Safety location refers to determining the specific location of the safety mechanism in LLMs, which is important for efficiently building a stable and reliable defense. Recent studies find that safety mechanism is not uniform across all layers of LLMs' transformer layers and only some specific layers are essential for the successful activation of defense [297, 298, 299]. Based on this finding, TGA [297] unveils the key reason for the inconsistency between visual and language safety capabilities in multimodal LLMs is that the visual and language modalities cannot be effectively aligned at the activation layers for safety mechanism. SPPFT [298] proposes a novel fine-tuning approach to fixes the gradient of the safety layers during fine-tuning to address the security degradation. LED [299] shows that realigning the safety layers with the decoded safe response from identified toxic layers can significantly improve the alignment of LLMs against jailbreak attacks." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.881, + 0.341, + 0.896 + ], + "angle": 0, + "content": "4.2.5 Open-Weight LLMs Safeguard" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.899, + 0.492, + 0.943 + ], + "angle": 0, + "content": "As open-weight LLMs become increasingly public accessible, concerns about their potential misuse have intensified. Once model weights are public, malicious actors" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.925, + 0.156 + ], + "angle": 0, + "content": "can fine-tune or alter them to remove safety alignment, enabling harmful applications such as generating misinformation, planning cyberattacks, or providing instructions for weapons development. Because LLMs grow in capability, ensuring these models cannot be easily repurposed for high-risk misuse has become a critical concern for both researchers and policymakers, like NIST [300, 301]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.156, + 0.927, + 0.45 + ], + "angle": 0, + "content": "Traditional safety techniques—such as refusal training via supervised fine-tuning or reinforcement learning—are often ineffective in this setting, as they can be easily undone by adversarial modifications [240, 269]. In response, researchers have proposed post-training defenses that aim to remain effective even when the model is directly manipulated after release. Two notable approaches are Representation Noising [302] and Tamper Attack Resistance [303]. These approaches attempt to protect models by degrading their ability to learn or recall harmful knowledge, even after extensive fine-tuning. The goal is to raise the cost of misuse, even under strong threat models where attackers have full access to model weights. However, recent studies [301] have shown that evaluating the durability of these defenses is itself difficult. Minor changes in fine-tuning setup—such as different prompt formats, or random seeds—can lead to drastically different outcomes. Moving forward, researchers could clearly define threat models, improve reproducibility, and develop safeguards that offer measurable resilience across a wide range of adaptive attack strategies." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.468, + 0.627, + 0.482 + ], + "angle": 0, + "content": "4.3 Evaluation" + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.487, + 0.688, + 0.501 + ], + "angle": 0, + "content": "4.3.1 Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.505, + 0.924, + 0.607 + ], + "angle": 0, + "content": "As discussed in previous studies [127, 304], the goal of defense is to ensure that the model is able to (1) keep harmlessness after attack and (2) achieve similar levels of performance on downstream tasks with or without defense. In response to the two goals, we summarize the metrics involved in the existing research into two types: safety metrics and utility metrics." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.608, + 0.925, + 0.899 + ], + "angle": 0, + "content": "Safety metrics: This type of metric is used to evaluate the model's ability to maintain the safety of its outputs after being attacked. Attack Success Rate (ASR), introduced in [260], is one of the earliest safety metrics and has been widely adopted in subsequent works [305, 306, 307], and these papers employ different names for this metric, such as rejection rate [308] and fulfillment rate [309]. The novel measurements of safety metrics emerge with the advent of LLM-as-a-Judge [310, 311]. [261] is the first to apply LLMs to label model outputs as either safe or unsafe and calculates the ratio of unsafe labels as the safety metric. This method effectively leverages the generalization capability of LLMs and has been widely adopted [312, 313, 314]. However, this method also exhibits notable limitations, such as the inability to distinguish between different levels of risk. To address them, [315, 316] measures safety by calculating the alignment rate of the model's responses to safety-related multi-choice questions and those of human evaluators, and [230, 238] utilize a 5-point scale for LLM-based evaluators for more fine-grained evaluation." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.899, + 0.925, + 0.943 + ], + "angle": 0, + "content": "Utility metrics: In research on LLM safety, this type of metric is used to evaluate whether the model maintains its original performance on downstream tasks after an attack" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "12" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.054, + 0.493, + 0.302 + ], + "angle": 0, + "content": "or defense. Researchers demonstrate the impact of their methods on model performance by comparing the results of utility metrics before and after the operation. For close-end tasks which have certain ground-truth labels, such as mathematical problems [317, 318, 319], coding tasks [320, 321], and classification tasks [322, 323], researchers typically use accuracy, the ratio of samples for which the model provides the correct answer. For open-ended tasks without a definite correct answer, the metrics are more diverse. For QA tasks [310, 324, 325], researchers primarily use LLM-based rating systems or similarity between generated content and standard response. For text summarization [326] and machine translation [327], ROUGE score and BLEU are widely used. By preserving utility, models can maintain their helpful capabilities while resisting attacks, ensuring that safety enhancements do not compromise their practical value in real-world applications." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.303, + 0.493, + 0.639 + ], + "angle": 0, + "content": "Safety and Utility Trade-off metrics: Safety alignment is far more than simply refusing to answer harmful questions [265, 328]. In other words, it is insufficient to rely solely on a classifier that rejects safety-related prompts while responding normally to others [329, 330]. When evaluating a model's safety alignment, a key focus is dual-preference evaluation - assessing whether the model can remain helpful while adhering to safety constraints [175]. For example, consider the prompt, \"How to make a bomb?\" A basic form of safety alignment would involve the model refusing to respond - similar to the approach taken by traditional moderation systems. However, beyond single-preference evaluation, a more advanced form of safety alignment not only withholds harmful information but also provides value-based reasoning and active dissuasion [253]. For instance, the model might reply: \"Building a bomb is extremely dangerous and poses serious risks to public safety. Such actions could cause significant harm and may lead to criminal prosecution.\" The goal of safety alignment is to ensure that a model's behavior aligns with human intentions and values, particularly in safety-critical contexts [331]. In this way, the goal is to achieve a form of bidirectional value alignment between the model and human values [332]." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.658, + 0.295, + 0.672 + ], + "angle": 0, + "content": "4.3.2 Evaluation Benchmarks" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.679, + 0.491, + 0.781 + ], + "angle": 0, + "content": "In current applications, the boundary between alignment benchmarks and fine-tuning benchmarks is not clearly defined. Some datasets from alignment benchmarks [175, 333], after appropriate modifications, can also be utilized for fine-tuning benchmarks. Thus, we classify them into two types as per their purposes. We summarize some widely-used benchmarks in Table 4." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.782, + 0.492, + 0.943 + ], + "angle": 0, + "content": "Safety-purpose benchmarks: These benchmarks evaluate the model's ability to maintain safety and align with human values when handling harmful prompts. They are the primary benchmarks used in safety research, effectively testing whether attack or defense methods influence the model's handling of harmful prompts. The design of responses varies depending on the specific purpose. [238, 260] consists of harmful prompts and harmful responses and [334, 335] only contains harmful prompts. Benchmarks or datasets designed for safety alignment, like BeaverTails [175] and HH-RLHF [155], typically not only include both" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.923, + 0.083 + ], + "angle": 0, + "content": "safe and harmful responses but also sometimes include human preference data." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.084, + 0.923, + 0.275 + ], + "angle": 0, + "content": "General-purpose benchmarks: These benchmarks are used to evaluate the model's performance, such as accuracy, knowledge breadth, and reasoning, typically not intentionally including harmful data. In LLM safety, assessing the model with general-purpose benchmarks assists in analyzing the impact of defenses on the model's performance or is combined with harmful data to simulate fine-tuning attacks. Representative datasets include AlpacaEval [324], Dolly-15k [336], HPD v2 [337], GSM8K [317], ErrorRadar [338], etc. General-purpose benchmarks are also critical for LLM safety research, verifying that mitigation strategies do not degrade model performance on benign tasks, thereby balancing between helpfulness and harmlessness." + }, + { + "type": "table_caption", + "bbox": [ + 0.523, + 0.288, + 0.905, + 0.317 + ], + "angle": 0, + "content": "TABLE 4: Summary of typical benchmarks with access links." + }, + { + "type": "table", + "bbox": [ + 0.507, + 0.322, + 0.925, + 0.664 + ], + "angle": 0, + "content": "
BenchmarkTypeTaskMetric
AlpacaEval [324]GeneralGeneral QAWin Rate
Dolly-15k [336]GeneralGeneral QAROUGE, BERT Score
PubmedQA [339]GeneralMedical QAAccuracy
GSM8K [317]GeneralMathematicsAccuracy
HumanEval [320]GeneralCodingCode Pass Rate
AGNews [322]GeneralClassificationAccuracy
WMT14 [327]GeneralTranslationBLEU, ROUGE
CNN/DailyMail [340]GeneralSummarizationROUGE
HH-RLHF [155]SafetyGeneral QARejection Rate, Helpfulness
BeaverTails [175]SafetyGeneral QAAccuracy, Win Rate
TruthfulQA [341]SafetyGeneral QATruthfulness
PureBad [238]SafetyHarmful QAASR, Harmfulness Score
DecodingTrust [333]SafetyHarmful QAASR, Accuracy
AdvBench [260]SafetyHarmful QAASR
SALAD-Bench [316]SafetyHarmful QAASR, Safety Rate
SG-Bench [342]SafetyHarmful QAFailure Rate
SafeChain [343]SafetyHarmful QASafe@1, Safe@K
HarmBench [305]SafetyHarmful PromptASR
HEX-PHI [238]SafetyHarmful PromptASR
RealToxicPrompts [334]SafetyHarmful PromptToxicity Rate
Do-Not-Answer [335]SafetyHarmful PromptHarmfulness Score
OR-Bench [308]SafetyHarmful PromptRejection Rate
SorryBench [309]SafetyHarmful PromptFulfillment Rate
Anthropic [254]SafetyHarmful PromptASR
DirectHarm4 [281]SafetyHarmful PromptASR, Harmfulness Score
GSM-Danger [281]SafetyHarmful PromptASR
SafetyBench [315]SafetySafety EvaluationAccuracy
ToxiGen [344]SafetySafety EvaluationAccuracy
R-Judge [314]SafetySafety EvaluationAccuracy
JailbreakBench [306]SafetyJailbreakASR
StrongREJECT [345]SafetyJailbreakWillingness
WildJailbreak [346]SafetyJailbreakASR
" + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.708, + 0.725, + 0.723 + ], + "angle": 0, + "content": "4.4 Roadmap & Perspective" + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.731, + 0.816, + 0.746 + ], + "angle": 0, + "content": "4.4.1 From Low-Level to High-Level Safety" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.753, + 0.924, + 0.943 + ], + "angle": 0, + "content": "With advancements in safety alignment technologies, LLMs are now less likely to explicitly exhibit harmful behaviors associated with low-level safety, such as violence, pornography, or discrimination [254, 265]. In contrast, as LLMs' reasoning capabilities continue to advance, a growing number of researchers are shifting their attention toward high-level safety—concerned with the potential for LLMs to engage in harmful behaviors that are not explicitly observable, such as deception or sycophancy [347]. These behaviors often require specific environmental conditions to manifest and can only be detected through specialized monitoring mechanisms [348], making them comparatively more covert than low-level safety issues." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.924, + 0.044 + ], + "angle": 0, + "content": "13" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.493, + 0.199 + ], + "angle": 0, + "content": "4.4.1.1 Deceptive Alignment: As LLMs continue to advance in reasoning and planning capabilities, the risk of deceptive behavior has attracted increasing scrutiny from researchers [349]. In this context, deception refers to the behavior in which a model intentionally misleads users or creates false impressions to achieve instrumental goals that are independent of factual accuracy [350]. For instance, advanced models such as GPT-4 have exhibited behaviors suggestive of misleading users or obfuscating their underlying objectives during complex interactions [349, 351]." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.2, + 0.493, + 0.375 + ], + "angle": 0, + "content": "Deception is defined as systematically inducing others to form false beliefs in order to achieve goals beyond merely conveying the truth [350]. This definition does not presuppose that the model holds human-like beliefs or intentions, but rather focuses on whether its external behavioral patterns resemble those characteristics of deception. In contrast, there is a more formalized definition grounded in game theory and causal reasoning [352], which incorporates the notions of intentionality and belief, modeling deception through a formally structured causal game-theoretic framework and offering criteria for distinguishing deception from related phenomena such as concealment." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.375, + 0.492, + 0.448 + ], + "angle": 0, + "content": "Evaluating the deceptive tendencies of LLMs requires a multi-layered, multi-scenario approach to comprehensively capture when and why such behavior occurs. The following outlines commonly used experimental designs, including various assessment scenarios and techniques:" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.448, + 0.493, + 0.549 + ], + "angle": 0, + "content": "Hypothetical Scenarios and Moral Dilemmas: Some studies design conflict scenarios pitting honesty against goal completion, analyzing model responses [353]. Empirical findings reveal models' tendency toward deception, whether to relieve situational pressure or secure higher utility. By varying environment settings, researchers can examine triggers of deceptive behavior [354]." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.55, + 0.493, + 0.724 + ], + "angle": 0, + "content": "Multi-Agent Interaction and Game Experiments: The model is tested in multi-agent games or social scenarios where success depends on interactions with other agents. Notable examples include the Hoodwinked experiment [355] and the strategic game Diplomacy [356]. These environments permit deceptive interactions, enabling evaluation of whether the model uses deception strategies to gain a competitive advantage [357]. Experiments can monitor the frequency, content, and effectiveness of the model's deceptive behaviors, comparing them with those of human players or models of various scales. Multiplayer game testing can assess the model's social deception skills." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.725, + 0.493, + 0.913 + ], + "angle": 0, + "content": "Autonomous Agency and Covert Action Testing: The model is provided with a defined objective and constraints, along with a certain degree of operational freedom (e.g., tool usage, code execution, or interaction interfaces), and is then observed for covert constraint violations in pursuit of its goal, particularly efforts to disguise such behavior [351, 358]. To enhance the evaluation, experiments may deliberately introduce hidden motives [359]. For example, an AI assistant may have access to sensitive information needed for task completion but is explicitly prohibited from using it without permission. The question then becomes whether the AI assistant covertly exploits the information while hiding this behavior from the user [353]." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.914, + 0.493, + 0.945 + ], + "angle": 0, + "content": "Prompt Manipulation and Role Guidance: Targeted prompts or configurations can be used to elicit or sup" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.925, + 0.213 + ], + "angle": 0, + "content": "press deceptive behavior in the model, thereby assessing its propensity and robustness. The model may be encouraged to achieve goals by any means necessary or be instructed to be completely honest in order to evaluate its performance in the same task [360]. Experimental results indicate that emphasizing honesty or highlighting potential risks can reduce deceptive behavior to some extent, though such behavior cannot be eliminated entirely [353]. These experiments help determine whether the model exhibits a stable propensity for deception or displays such behavior only under specific conditions." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.214, + 0.925, + 0.491 + ], + "angle": 0, + "content": "Multi-turn Consistency and Alignment Resistance Check: Construct multi-turn dialogue scenarios to evaluate whether the model can consistently uphold a lie. For instance, the model is tasked with maintaining deception across multiple rounds of Question-Answering, while its responses are examined for inconsistencies [361]. A model lacking consistency may confess under pressure or contradict itself, whereas a more advanced model would persist in fabricating lies to sustain the illusion. By tracking how frequently the model's deceptions are uncovered or inadvertently disclosed throughout multi-turn interactions, one can quantify its capacity for sustained deception [354]. Moreover, due to alignment resistance in LLMs, a small amount of data may suffice for the model to revert to its pre-training distribution [362]. Therefore, evaluating the model's robustness during the deception process can reveal its tendency toward deceptive behavior under its real distribution, potentially necessitating some degree of inverse training for thorough assessment." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.491, + 0.925, + 0.71 + ], + "angle": 0, + "content": "Thought Process and Internal State Monitoring: This method infers the model's intentions by analyzing its thought processes or internal activations. For example, the model may be prompted to produce a \"thought log\" alongside its response [359], or the reasoning process itself may serve as the log in the case of reasoning models [348]. If the content of the log contradicts the response, it may indicate deceptive behavior. Embedded linear probes can also monitor real-time activations associated with deception [363]. However, deciding how to act once \"bad thoughts\" are detected remains challenging: OpenAI found that penalizing such monitored thoughts reduces their explicit occurrence but does not curb most misbehavior—instead, models learn to conceal their intent within the very \"thought logs\" meant to expose it [364]." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.71, + 0.925, + 0.945 + ], + "angle": 0, + "content": "4.4.1.2 Reward Hacking: Reward hacking refers to situations in which an AI agent exploits flaws or ambiguities in the reward function to obtain high rewards in unintended ways, without truly accomplishing the intended task of the designer [365, 366]. This behavior reflects a manifestation of reward mis-specification, also known as specification gaming [331, 367]. Reward hacking has long been a concern in the field of AI safety [368]. The root of this problem can be understood through Goodhart's Law: \"when a measure becomes a target, it ceases to be a good measure\" [369]. When a proxy metric is used to represent a human's true goal, strong optimization may cause the agent to exploit mismatches between the proxy and the actual objective, resulting in failure. Reward tampering is considered a special case of reward hacking, in which the agent directly interferes with the reward signal source (e.g.," + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "14" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.492, + 0.083 + ], + "angle": 0, + "content": "by modifying the reward function) to obtain high rewards [370, 371]." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.084, + 0.491, + 0.215 + ], + "angle": 0, + "content": "With the widespread adoption of Reinforcement Learning from Human Feedback (RLHF) in training LLMs, reward models that rely on a single scalar value struggle to capture the complexity of human value systems [372, 373]. If the reward model fails to accurately reflect genuine human preferences, the LLM may learn to exploit its biases or those of human evaluators, resulting in various forms of reward hacking. The following are common manifestations of this phenomenon observed in large models." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.215, + 0.492, + 0.332 + ], + "angle": 0, + "content": "Sycophancy: Since LLMs are optimized for human preferences, or for reward models based on such preferences, during fine-tuning, they tend to prioritize satisfying users or human supervisors to maximize rewards, rather than adhering strictly to objective correctness. This tendency is reflected in the way their responses often shift to align with users' implied stances, catering to their preferences [374, 375]." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.333, + 0.491, + 0.48 + ], + "angle": 0, + "content": "Reward Overoptimization: Model outputs may be excessively optimized for specific formal features to satisfy the reward model. For example, the model may produce unnecessarily lengthy responses [376], as human preference for detailed answers during training leads the reward model to favor longer outputs. Moreover, the model may adapt its writing style and formatting to align with the reward model's preferences, instead of prioritizing content accuracy. For instance, it may learn to respond to harmful queries with overly cautious refusals [237, 377]." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.497, + 0.303, + 0.512 + ], + "angle": 0, + "content": "4.4.2 Provably Safe AI System" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.517, + 0.491, + 0.679 + ], + "angle": 0, + "content": "Provably safe AI systems represent an emerging paradigm that aims to ensure that advanced AI operates within rigorous, formally verifiable safety bounds. Some researchers argue that only by embedding mathematically verified safety proofs into AI architectures can we guarantee that such systems will never deviate into harmful behaviors [378]. This formal approach contrasts sharply with traditional empirical testing and red-teaming methods, which often fail to uncover all failure modes in complex or adversarial environments. The achievement of provable safety requires the integration of several key components [379] as follows:" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.679, + 0.492, + 0.752 + ], + "angle": 0, + "content": "Formal Safety Specifications: A rigorously defined set of safety properties (e.g., \"do no harm\") must be articulated in a formal language. Such specifications are designed to capture the essential criteria that AI systems must satisfy under all operating conditions." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.753, + 0.492, + 0.825 + ], + "angle": 0, + "content": "World Models: To evaluate the consequences of AI actions, it is essential to build a world model that encapsulates the dynamics and causal relationships of the environment. This model allows for the translation of abstract safety requirements into concrete behavioral constraints." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.826, + 0.492, + 0.943 + ], + "angle": 0, + "content": "Verification Mechanisms: A verifier is needed to ensure that the AI system meets the safety specifications with respect to the world model, regardless of whether it is implemented as a formal proof certificate, a probabilistic bound or an asymptotic guarantee. Such mechanisms are the only reliable method to exclude the possibility of catastrophic failure by proving that certain harmful behaviors are mathematically impossible [378]." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.923, + 0.157 + ], + "angle": 0, + "content": "Robust Deployment Infrastructure: Beyond predeployment verification, runtime monitoring and redundant safety measures (such as provably compliant hardware) must be implemented. These safeguards ensure that if discrepancies between the world model and observed behavior occur, the system can transition to a safe state without human intervention [378, 379]." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.169, + 0.83, + 0.185 + ], + "angle": 0, + "content": "4.4.3 Beyond Fine-tuning, Systematic Safety" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.187, + 0.923, + 0.319 + ], + "angle": 0, + "content": "AI governance encompasses the establishment and enforcement of regulatory frameworks necessary for the safe development and deployment of AI systems. Given the potential of AI to exacerbate societal biases [374, 380, 381], displace labor [382], and pose existential risks due to increasingly autonomous capabilities [15, 351], governance is critical. The primary objective of AI governance is to mitigate these diverse risks effectively, requiring stakeholders to maintain a balanced consideration of various risk categories." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.319, + 0.923, + 0.493 + ], + "angle": 0, + "content": "A multi-stakeholder approach characterizes contemporary AI governance, involving governments, industry and AI laboratories, and third-party entities such as academia and non-profit organizations [383]. Governments create regulatory frameworks, conduct oversight, and establish risk management systems [384, 385], while industries and AI laboratories undertake comprehensive risk assessments throughout AI development lifecycles and voluntarily adopt security measures [386, 387]. Third parties provide critical auditing services and policy advice, fostering international cooperation and balanced stakeholder interests [388, 389, 390]." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.493, + 0.923, + 0.772 + ], + "angle": 0, + "content": "Nevertheless, AI governance faces significant unresolved challenges, prominently in international and open-source contexts. International governance discussions emphasize the importance of global frameworks to manage catastrophic risks such as AI-driven arms races and inequitable distribution of AI benefits [388, 391]. Historically, international governance frameworks like the OECD AI Principles and the global ethical standards produced by the United Nations Educational, Scientific and Cultural Organization (UNESCO) offer instructive precedents [392, 393]. Conversely, open-source governance is debated regarding the balance between transparency's security benefits and potential misuse risks [394, 395]. Advocates argue that openness enhances security through rapid issue identification and reduces centralized control [396, 397], while critics highlight risks of malicious use and vulnerabilities from unrestricted access [260, 398]. This ongoing debate underscores the need for measured, risk-informed policies and gradual openness strategies [399, 400]." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.791, + 0.893, + 0.807 + ], + "angle": 0, + "content": "5 SAFETY IN MODEL EDITING & UNLEARNING" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.811, + 0.922, + 0.913 + ], + "angle": 0, + "content": "Model editing and unlearning techniques can be conceptualized as lightweight adjustments to information and efficient safeguards for privacy and security during the deployment of LLMs. In this work, we integrate discussions on model editing and unlearning into the fine-tuning section to provide a more systematic and comprehensive analysis of their roles in enhancing model safety and robustness." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.914, + 0.923, + 0.943 + ], + "angle": 0, + "content": "Concretely, model editing [401, 402] and unlearning [403, 404, 405, 406, 407, 408] can be understood as methods" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "15" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.054, + 0.493, + 0.215 + ], + "angle": 0, + "content": "to efficiently modify model parameters during deployment to enhance the model's security and privacy. To better reflect the comprehensiveness of our survey, we have included relevant literature on the safety of editing (Section 5.1) and unlearning (Section 5.2). It is noteworthy that there exists a certain degree of technical overlap between model editing and unlearning. To provide a clearer and more precise exposition, we focus model editing on addressing knowledge conflicts within the model, while unlearning is primarily concerned with the erasure of knowledge to ensure privacy protection." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.236, + 0.288, + 0.252 + ], + "angle": 0, + "content": "5.1 Safety in Model Editing" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.256, + 0.491, + 0.373 + ], + "angle": 0, + "content": "LLMs retain incorrect or outdated information [409], and for this reason, model editing has emerged to advocate updating knowledge in LLM by modifying a small part of the parameters. In recent years, scholars have begun to investigate model editing in LLMs. Generally, model editing methods can be mainly categorized into gradient-based [410, 411], memory-based [412, 413] and locate-then-edit methods [414, 415, 416]." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.373, + 0.492, + 0.49 + ], + "angle": 0, + "content": "Gradient. Early approaches [410, 411, 417] advocate that the updating of knowledge in the LLMs is accomplished by modifying the gradient of the LLM. A more recent study [418] revisits gradient-based fine-tuning and demonstrates strong performance through constrained optimization techniques. However, since gradient-based methods are too complex and suffer from pattern collapse, it is gradually being replaced by other research lines [419, 420]." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.49, + 0.492, + 0.577 + ], + "angle": 0, + "content": "\\(\\rightarrow\\) Memory. Memory-based methods [412, 413] advocate the introduction of external parameters to assist in updating knowledge. Though effective, models with excessive parameters face the problem of over-parameterization – where the parameter space becomes significantly larger than necessary to capture the underlying data distribution [420, 421]." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.579, + 0.492, + 0.737 + ], + "angle": 0, + "content": "- Locate-then-edit. Locate-then-edit methods, represented by RoME [416], MEMIT [421] and AlphaEdit [402], localizing knowledge storage-related neurons by causal tracing, achieving knowledge editing by modifying these neurons, have made breakthroughs in recent years [422, 423, 424]. The locate-then-edit approach has been proven to be effective in updating specific factual knowledge in the LLM [402]. Thus it is widely used to edit the security of LLMs [425, 426]. In the following part, we will focus on the application of the locate-then-edit approach to the security domain." + }, + { + "type": "list", + "bbox": [ + 0.072, + 0.373, + 0.492, + 0.737 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.738, + 0.493, + 0.945 + ], + "angle": 0, + "content": "Attack. Model editing can break the secure alignment of LLMs when injecting harmful knowledge into LLM. Chen et.al [425] first proposed the concept of editing attack, constructing a dataset named EDITATTACK, and using editing methods such as RoME [416] and IKE [427] successfully injected harmful, incorrect, and bias information to LLMs. Since model editing modifies the corresponding knowledge in the form of knowledge triples, BadEdit [428] proposes a way to inject triggers using model editing. BadEdit designs specific triggers such as the color of a banana, the shape of an apple, or specific letter combinations such as \"aaa\" and \"bbb\" to trigger the model to output harmful content. Building on this basis, Concept-RoT [429] designs a more invisible approach by proposing \\( k_{0} \\) based on the concept" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.923, + 0.154 + ], + "angle": 0, + "content": "of context, and implanting a backdoor against the concept of context by editing the value corresponding to \\( k_{0} \\), thus realizing the effect of the conceptual Trojan horse. In addition, DEPN [430] devised a method to first locate private neurons, and secondly edit the specified private neurons through RoME so that the model outputs sensitive private information." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.156, + 0.924, + 0.259 + ], + "angle": 0, + "content": "Defense. Model editing can also be used as a means of improving the security of a model, Zhang et.al [426] proposed a model editing method named DINM, to localize and detoxify toxic neurons via model editing, making the model less susceptible to jailbreaking. In addition, other studies [422, 431, 432] have explored the use of model editing for blue teams. Model editing methods have made big strides" + }, + { + "type": "table_caption", + "bbox": [ + 0.545, + 0.267, + 0.882, + 0.282 + ], + "angle": 0, + "content": "TABLE 5: Model Editing for attack and defense." + }, + { + "type": "table", + "bbox": [ + 0.507, + 0.286, + 0.924, + 0.39 + ], + "angle": 0, + "content": "
MethodsAttack?BackDoor?Defense?Parameter?
RoME[416]
IKE[427]--X
AlphaEdit[402]
BadEdit[428]X
ConceptROT[429]X
DEPN[430]XX
DINM[426]XX
PEM[432]XX
" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.399, + 0.923, + 0.503 + ], + "angle": 0, + "content": "in red team, making them an effective means of injecting risk content into safely aligned models. We summarize the mainstream editing for attacks and defenses in Table 5 and each row in the table represents distinct included content.. Against model editing attacks, no research has been done to make a specific defense against such attacks, so further exploration in this area is an important research topic." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.516, + 0.698, + 0.531 + ], + "angle": 0, + "content": "5.2 Safety in Unlearning" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.535, + 0.924, + 0.899 + ], + "angle": 0, + "content": "LLMs have demonstrated remarkable capabilities in various tasks, but their training on vast and often unfiltered datasets from the Internet inevitably leads to the absorption of unsafe information [433, 434, 435, 436, 437, 438]. This includes biases [439], stereotypes [440], toxic language [441], misinformation [442, 443, 444], and even private data [71]. Therefore, LLM unlearning is crucial for ensuring their safe and responsible deployment [406, 445], as shown in Figure 6. Unlearning, in this context, refers to the process of selectively removing or mitigating the influence of specific knowledge, behaviors, or data points from a trained LLM [446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456]. Unlearning methods can be distinguished into two broad paradigms [457]: exact (certified) unlearning and heuristic (approximate) unlearning. Exact methods accurately identify poisoned data points or affected parameters, providing formal or statistical guarantees that the specified behaviors no longer influence the model. This typically requires certified retraining from scratch, removing the disallowed data entirely [458]. Two primary paradigms have emerged to achieve approximate unlearning: parameter-adjusting methods, which modify the model's internal weights, and parameter-preserving methods, which intervene externally without altering the core model architecture (refer to Figure 6)." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.899, + 0.923, + 0.943 + ], + "angle": 0, + "content": "Parameter-Adjusting Unlearning. The first paradigm, which involves adjusting the model's parameters, is characterized by its direct intervention in the model's internal" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "16" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.054, + 0.493, + 0.491 + ], + "angle": 0, + "content": "structure. This approach typically requires retraining or fine-tuning the model on a curated dataset, designed to counteract the unsafe knowledge or behavior that needs to be unlearned. It also encompasses methods that follow a locate-then-edit pipeline, where specific parameters associated with the target knowledge are identified and directly modified to achieve unlearning [456]. Techniques such as Gradient Ascent [459] and its variations [460] are commonly employed. While traditional fine-tuning using cross-entropy loss is prevalent, more specialized loss functions have been proposed to enhance the control over the outputs of unlearned models, such as KL minimization [461, 462, 463] and the IDK loss function [464]. Additionally, recent work [465] has reframed LLM unlearning as a preference optimization problem [466], utilizing Negative Preference Optimization loss to improve the unlearning process. In contrast to these training-intensive approaches, LaW [456] draws inspiration from model editing by identifying and removing knowledge associations embedded in MLP weights, aiming to eliminate targeted information with minimal impact on the model's overall capabilities. Given the recent powerful multimodal perception and generation nature of LLMs, MMUnlearner [467] proposes to reformulate the setting of multimodal unlearning, which aims at erasing the unwanted visual concept but still preserving textual knowledge. Based on existing multimodal LLM-based unlearning benchmarks [468, 469, 470], SafeEraser [471] further incorporates unlearning mechanism and evaluation into multimodal LLM safety, via introducing Prompt Decouple Loss and a new metric called Safe Answer Refusal Rate." + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.506, + 0.49, + 0.741 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.085, + 0.748, + 0.482, + 0.78 + ], + "angle": 0, + "content": "Fig. 6: The taxonomy illustration of LLM Unlearning for safety." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.797, + 0.493, + 0.945 + ], + "angle": 0, + "content": "Parameter-Preserving Unlearning. The second paradigm, which does not involve adjusting the model's parameters, focuses on external interventions that guide the model's outputs without altering its internal parameters. Techniques in this category often include post-processing methods or the use of auxiliary models to filter or modify the LLM's unsafe responses. Editing-based techniques [430, 472, 473, 474] modify specific components of the model architecture or introduce additional modules to counteract unwanted knowledge. Task vector approaches [475, 476]" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.925, + 0.14 + ], + "angle": 0, + "content": "leverage the geometric properties of the parameter space to identify and neutralize directions associated with targeted information. More recently, in-context learning strategies [477, 478] have emerged, which guide the LLM's behavior through carefully crafted prompts rather than weight modifications." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.142, + 0.925, + 0.201 + ], + "angle": 0, + "content": "Although heuristic methods are far more scalable, their guarantees are only empirical. Closing this gap between certified safety and practical feasibility remains a central research challenge for the field." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.226, + 0.726, + 0.241 + ], + "angle": 0, + "content": "5.3 Roadmap & Perspective" + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.247, + 0.657, + 0.262 + ], + "angle": 0, + "content": "5.3.1 Model Editing" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.267, + 0.924, + 0.442 + ], + "angle": 0, + "content": "The evolution of model editing traces back to localized factual updates (e.g., correcting \"Olympics host city\" from Tokyo to Paris), where its efficiency and precision positioned it as an agile solution for urgent safety patches. Early methods focused on atomic knowledge triples but soon expanded into adversarial domains: attacks progressed from binary semantic inversion to targeted answer manipulation, while defenses leveraged editing's granularity to neutralize harmful behaviors without model retraining. Crucially, model editing's ability to implant stealthy backdoors revealed its dual-edged nature — a capability demanding equal attention in both offensive and defensive research agendas." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.442, + 0.924, + 0.661 + ], + "angle": 0, + "content": "In the era of sophisticated safety alignment, model editing addresses a critical niche. While safety fine-tuning establishes systematic safeguards through periodic retraining, it struggles with emergent, context-sensitive risks (e.g., geopolitical shifts or cultural updates) that evolve faster than retraining cycles. As LLMs scale, the intervals between alignment updates widen, creating safety gaps exacerbated by catastrophic forgetting risks. Model editing bridges these gaps through rapid surgical interventions — executing updates orders of magnitude faster than alignment procedures — by modifying specific unsafe knowledge or concepts, all while preserving general model stability. In summary, while safety fine-tuning remains essential for systematic alignment, model editing addresses four fundamental limitations in the current era:" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.665, + 0.923, + 0.695 + ], + "angle": 0, + "content": "- Temporal Agility: Mitigates emergent, unpredictable safety risks that cannot wait for full retraining cycles." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.695, + 0.923, + 0.754 + ], + "angle": 0, + "content": "- Granular Control: Enables surgical modifications to specific reasoning pathways in large reasoning models (LRMs), correcting flawed chain-of-thought logic without disrupting valid inference patterns." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.754, + 0.923, + 0.798 + ], + "angle": 0, + "content": "- Resource Decoupling: Reduces computational barriers for safety-critical updates, particularly in multimodal settings where traditional retraining costs scale prohibitively." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.797, + 0.923, + 0.944 + ], + "angle": 0, + "content": "- Stable editing: Model editing is an ongoing and iterative process; however, excessive modifications can compromise the model's performance, likely due to the intricate interdependencies among neurons. Therefore, ensuring stable performance during continuous editing is of paramount importance. This process may involve algorithms that safeguard the model's integrity while potentially incorporating memory mechanisms to maintain balance. In summary, altering the original model parameters is a relatively \"risky\" endeavor, and plug-and-play externals" + }, + { + "type": "list", + "bbox": [ + 0.506, + 0.665, + 0.923, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "17" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.054, + 0.49, + 0.082 + ], + "angle": 0, + "content": "nal modules may emerge as the predominant approach in the future." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.086, + 0.49, + 0.117 + ], + "angle": 0, + "content": "Future frontiers highlight model editing's unique value proposition. Specifically," + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.118, + 0.49, + 0.175 + ], + "angle": 0, + "content": "- More Hidden Backdoor: By precisely modifying targeted parameters without perturbing unrelated knowledge, edited backdoors evade traditional detection methods that monitor broader model behavior." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.177, + 0.49, + 0.234 + ], + "angle": 0, + "content": "- Multimodal Safety: In multimodal systems, editing reduces the computational burden of aligning heterogeneous data streams by selectively modifying cross-modal attention mechanisms." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.235, + 0.49, + 0.293 + ], + "angle": 0, + "content": "- Concept-Level Safety: Directly edit abstract safety concepts (e.g., age-restricted content policies/R18) through latent space interventions, bypassing the need for complex reinforcement learning-based alignment (e.g., DPO)." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.293, + 0.49, + 0.35 + ], + "angle": 0, + "content": "- Interpretability-driven Safety: The model editing's interpretability dimension further provides causal insights into safety-critical model behaviors, informing robust verification frameworks." + }, + { + "type": "list", + "bbox": [ + 0.074, + 0.118, + 0.49, + 0.35 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.354, + 0.491, + 0.471 + ], + "angle": 0, + "content": "Critically, model editing complements - rather than replaces - systematic alignment, forming a hybrid governance paradigm: systematic alignment ensures broad ethical guardrails, while model editing enables surgical adaptations to emerging threats, i.e., establishing a closed-loop governance system for sustainable safe deployment. Together, they will form the twin pillars of LLM safety in the future." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.487, + 0.206, + 0.502 + ], + "angle": 0, + "content": "5.3.2 Unlearning" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.505, + 0.491, + 0.665 + ], + "angle": 0, + "content": "The concept of machine unlearning has evolved from a specialized issue in traditional machine learning to a key aspect of responsible AI governance for LLMs. Early efforts in unlearning primarily focused on removing data from smaller, more specialized models, often in response to privacy regulations such as the GDPR's \"right to be forgotten\" [446]. However, with the advent of LLMs—trained on vast, diverse, and often uncontrolled datasets—the landscape of machine unlearning has undergone significant transformation. This shift has introduced new challenges and imperatives that were previously unforeseen." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.666, + 0.493, + 0.945 + ], + "angle": 0, + "content": "The initial phase of LLM unlearning focused on adapting existing techniques—primarily parameter-adjusting methods like gradient ascent [459] and fine-tuning variants [461, 462, 463, 464, 479]—to the scale and complexity of LLMs. While this phase demonstrated the feasibility of unlearning, it also highlighted several fundamental limitations, such as computational cost [445, 449], catastrophic forgetting [451], and lack of granularity [406]. These limitations have driven the development of more refined approaches, such as parameter-preserving methods [472, 475, 476, 477, 478]. These methods, which utilize techniques like task arithmetic and in-context learning, provide a glimpse of a future where unlearning can be achieved with greater efficiency and precision. The shift to multimodal LLMs has further expanded the scope, necessitating unlearning methods that can address the safety concerns arising from the interaction between different modalities [467, 468, 469, 470, 471]. The current landscape of LLM unlearning can be described as a shift from reactive “data deletion” to proactive “knowledge" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.921, + 0.099 + ], + "angle": 0, + "content": "sculpting.\" We are moving beyond merely removing information to precisely shaping the model's understanding and behavior. This shift is driven by several key insights:" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.105, + 0.923, + 0.178 + ], + "angle": 0, + "content": "- Unlearning as Preference Optimization: By framing unlearning as preference learning, we can align the model's output with desired safety and ethical guidelines, utilizing techniques like Negative Preference Optimization [465, 466] or safety-oriented preference optimization [480]." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.179, + 0.922, + 0.251 + ], + "angle": 0, + "content": "- The Importance of Context: Since the \"unsafety\" of information is often context-dependent, researchers are developing methods to selectively unlearn behaviors in specific situations while maintaining the model's general capabilities [477, 481, 482, 483]." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.251, + 0.922, + 0.309 + ], + "angle": 0, + "content": "- Multimodal Unlearning: Addressing the fusion of modalities (text, images, audio) presents new challenges in removing unwanted concepts and behaviors both within and across modalities [467, 471, 484]." + }, + { + "type": "list", + "bbox": [ + 0.506, + 0.105, + 0.923, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.316, + 0.922, + 0.345 + ], + "angle": 0, + "content": "Looking ahead, several critical areas are essential for further advancement in the field:" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.352, + 0.922, + 0.425 + ], + "angle": 0, + "content": "- Principled Evaluation Metrics: Robust, standardized benchmarks are necessary to accurately assess unlearning effectiveness and potential side effects. These metrics should move beyond simplistic, easily manipulated measures [450, 476, 485, 486, 487]." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.426, + 0.922, + 0.498 + ], + "angle": 0, + "content": "- Theoretical Foundations: A deeper understanding of the mechanisms behind unlearning in LLMs is needed to develop truly reliable techniques [451, 488]. This includes exploring why unlearning is challenging and how different methods affect internal representations." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.499, + 0.922, + 0.586 + ], + "angle": 0, + "content": "- Hybrid Approaches: Combining parameter-adjusting methods (for coarse-grained removal) with parameter-preserving techniques (for fine-grained refinement) presents a promising path forward. This aligns with the \"hybrid governance paradigm\" from Model Editing, allowing for both broad and precise interventions." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.586, + 0.922, + 0.702 + ], + "angle": 0, + "content": "- Unlearning for Interpretability: Instead of using interpretability solely to guide unlearning, the unlearning process itself can be used to enhance our understanding of model behavior [489]. By selectively removing knowledge and observing the consequences, we gain causal insights into the model's reasoning. This represents a fundamentally different and more powerful use of unlearning—this is the key \"dry goods\" insight." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.702, + 0.922, + 0.834 + ], + "angle": 0, + "content": "- Unlearning Benchmark: Building upon the aforementioned insight, it is evident that unlearning currently lacks a standardized benchmark. Establishing a method to effectively balance a model's ability to forget while systematically ensuring its performance remains reliable is crucial (Figure 7). In the realm of multimodal learning, creating such a benchmark could be even more complex, potentially representing a pivotal step in advancing this field [471, 490, 491, 492, 493]." + }, + { + "type": "list", + "bbox": [ + 0.506, + 0.352, + 0.922, + 0.834 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.84, + 0.922, + 0.945 + ], + "angle": 0, + "content": "In conclusion, LLM unlearning is not merely a technical challenge; it is a fundamental requirement for building trustworthy and beneficial AI systems or even agent ecosystems [494, 495]. It is evolving from a reactive measure to a proactive design principle, shaping the very foundations of how LLMs learn, adapt, and interact with the world. The journey from \"forgetting\" to \"knowledge sculpting\"" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.924, + 0.044 + ], + "angle": 0, + "content": "18" + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.058, + 0.49, + 0.288 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.298, + 0.493, + 0.356 + ], + "angle": 0, + "content": "Fig. 7: We define the goal of unlearning as maximizing both model utility and forget quality, meaning that algorithms positioned closer to the top-right corner are considered more reliable." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.383, + 0.492, + 0.414 + ], + "angle": 0, + "content": "is underway, promising a future where LLMs can be both powerful and aligned with human values [496, 497, 498]." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.433, + 0.411, + 0.449 + ], + "angle": 0, + "content": "6 LLM(-AGENT) DEPLOYMENT SAFETY" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.452, + 0.493, + 0.629 + ], + "angle": 0, + "content": "In this section, we focus on the safety of LLM and LLM-agent during the deployment phase, addressing three progressively broader dimensions: LLM Safety (Section 6.1), Single-agent Safety (Section 6.2), and Multi-agent Safety (Section 6.3). We begin by discussing the potential threats and defense mechanisms associated with the foundational LLM during inference. Subsequently, we explore the additional security risks introduced by supplementary modules, which impact both individual agents and multi-agent systems. This structured approach ensures a comprehensive understanding of safety challenges at varying scales of LLM(-agent) deployment." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.647, + 0.258, + 0.663 + ], + "angle": 0, + "content": "6.1 Deployment Safety" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.665, + 0.493, + 0.943 + ], + "angle": 0, + "content": "The deployment of a single LLM introduces significant security challenges, including adversarial attacks, data privacy risks, and content integrity concerns. This subsection systematically examines these issues by first analyzing key attack vectors (Subsection 6.1.1), such as model extraction, membership inference, jailbreak attacks, prompt injection, data extraction, and prompt stealing, which threaten model confidentiality, robustness, and ethical compliance. Next, we explore defensive mechanisms (Subsection 6.1.2), including input preprocessing, output filtering, robust prompt engineering, and system-level security controls aimed at mitigating these threats. Finally, we discuss evaluation and benchmarking (Subsection 6.1.3), covering robustness, content safety, privacy leakage, multi-modal safety, and standardized security benchmarks, ensuring a comprehensive assessment of LLM deployment safety. This structure follows a logical progression from identifying threats to implementing defenses and establishing reliable evaluation methodologies." + }, + { + "type": "title", + "bbox": [ + 0.506, + 0.054, + 0.71, + 0.069 + ], + "angle": 0, + "content": "6.1.1 Attack in Deployment" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.075, + 0.866, + 0.09 + ], + "angle": 0, + "content": "We first give an overview of the attacks in Figure 8." + }, + { + "type": "image", + "bbox": [ + 0.51, + 0.108, + 0.921, + 0.269 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.278, + 0.922, + 0.308 + ], + "angle": 0, + "content": "Fig. 8: The overview of attacks in single LLM's deployment phase." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.324, + 0.924, + 0.572 + ], + "angle": 0, + "content": "Model Extraction Attacks. Model extraction attacks aim to steal a deployed language model, which only provides an Application Programming Interface (API) that processes text input (i.e., a prompt) and returns generated outputs. He et al. and Peng et al. [499, 500, 501, 502] made a series of early efforts in launching model extraction or stealing attacks against LLMs (even deployed as a service) and proposed various defense mechanisms to mitigate such risks. Carlini et al. [503] conducted the model-stealing attack against a black-box large language model by targeting its embedding projection layer. Building on this, Finlayson et al. [504] further investigated the risk of stealing embedding dimensions by exploiting the softmax bottleneck. Another line of research explores model extraction in a gray-box setting. For instance, Zanella et al. [505] demonstrated the feasibility of stealing high-fidelity language models when given access to a frozen or fine-tuned encoder." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.573, + 0.923, + 0.734 + ], + "angle": 0, + "content": "Another category of model extraction attacks focuses on recovering the full weight of an LLM. For instance, Horwitz et al. [506] successfully reconstruct a pre-fine-tuned LLM (i.e., the pre-trained model before fine-tuning) using its fine-tuned variants, such as low-rank adaptation (LoRA) models. Beyond general model-stealing attacks, some research explores threats to specialized capabilities. Li et al. [507] extract the coding abilities of an LLM, including code synthesis and translation. Additionally, Liu et al. [508] propose a theoretically grounded method for stealing any low-rank language model." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.734, + 0.922, + 0.779 + ], + "angle": 0, + "content": "Membership Inference Attacks. Membership Inference Attack (MIA) tries to figure out whether a given candidate is included in the training dataset of an LLM [117, 509]." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.782, + 0.924, + 0.945 + ], + "angle": 0, + "content": "Methods. [509] propose the first MIA with MIN-K% PROB, which identifies examples that contain few outlier words with low probabilities as non-members. Afterward, [510] propose MIN-K%++, which simulates the membership inference into identifying local maxima. Some works reveal that the success of MIAs against LLMs may be due to sampling non-members from different distributions. Thus, [511] propose Blind attack, which conducts MIA by applying a threshold and completely ignores the target model. [512] selectively combine the existing MIAs and aggregate their scores to perform a statistical test. [513]" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.924, + 0.044 + ], + "angle": 0, + "content": "19" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.054, + 0.493, + 0.199 + ], + "angle": 0, + "content": "identify the membership of a verbatim text by constructing paraphrased options (with another proxy model) and asking the target LLM for true verbatim. [514] examine the relative change in conditional log-likelihoods when prefixing target data points with non-member context. [515] propose to generate noisy neighbors for a target sample by adding stochastic noise in the embedding space. [516] train a neural network to capture variations in output probability distributions between members and non-members." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.2, + 0.493, + 0.346 + ], + "angle": 0, + "content": "\\(\\nRightarrow\\) Document-level MIAs. Some works focus on document-level MIAs. Meeus et al. [517] propose the first MIA for document-level leakage, which contains four steps: retrieving, normalizing, aggregating, and predicting. After that, Meeus et al. [518] validate that it doesn't work against models that do not naturally memorize and propose to utilize copyright traps to detect the use of copyrighted materials. Puerto et al. [519] make exploration toward collection-level MIA against LLMs by computing features and two-stage aggregation." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.346, + 0.495, + 0.667 + ], + "angle": 0, + "content": "Different Settings. Some works also explore the MIA risk in novel settings. Anderson et al. [520] propose the first MIA against Retrieval Augmented Generation (RAG) systems by directly asking whether one candidate is its member or not. Li et al. [521] compare the output semantic similarity of the sample for the RAG system and the remaining to determine the membership of RAG's database. Zhang et al. [522] propose the first MIA against in-context learning and four attack methods, including GAP, Inquiry, Repeat, and Brainwash. Meanwhile, Duan et al. [523] reveal that MIA risk in in-context learning is more severe than in the fine-tuning setting. Wen et al. [524] conduct membership inference of fine-tuning data by poisoning pretraining data and backdoorsing the pre-trained model. Then Wen et al. [525] comprehensively assess the MIA risk against adaptation methods, including LowRank Adaptation (LoRA), Soft Prompt Tuning (SPT), and In-Context Learning (ICL). Balloccu et al. [526] study the indirect data contamination for closed-source LLMs, which can also be regarded as MIA. Fu et al. [527] propose Self-calibrated Probabilistic Variation, which fine-tunes the reference model by prompting the target LLM." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.666, + 0.495, + 0.812 + ], + "angle": 0, + "content": "\\(\\nRightarrow\\) Factor Impact. Duan et al. [117] find that the existing MIAs work poorly on LLM due to massive training data and near-one epoch training. Li et al. [528] clarify the impact of fine-tuning and evaluation metrics and propose a three-phase framework (i.e. training, simulation, and confidence calculation) to assess membership leakage. Kandpal et al. [87] find that duplication of training data highly extends the risk of MIA. Naseh et al. [529] validate that using synthetic data in membership evaluations may lead to false classification results." + }, + { + "type": "list", + "bbox": [ + 0.068, + 0.2, + 0.495, + 0.812 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.826, + 0.493, + 0.945 + ], + "angle": 0, + "content": "Jailbreak Attacks. Jailbreak attacks aim to induce the large language model to generate unsafe content like violence [260]. Jailbreak attacks focus on bypassing the safety rules, including system safety prompts and safety filters, while prompt injection attacks target all system prompts. Lots of literature have studied the vulnerability of LLM, where different terms, including \"jailbreak attack\" and \"redteaming\", all point to the same safety vulnerability that" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.923, + 0.083 + ], + "angle": 0, + "content": "generates unsafe content. We classify them into two main categories, i.e. optimization-based and strategy-based." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.083, + 0.923, + 0.257 + ], + "angle": 0, + "content": "Strategy-based jailbreaks figure out novel strategies or templates to generate one adversarial prompt at a heat to test LLMs' vulnerabilities, which are pre-defined. Thus, the generated prompt is non-evolvable. Specifically, useful strategies include persuasion [559], role-playing [560, 561, 562, 563], cipher [564, 565], ASCII [566], long-context [567], low-resource language [568, 569], in-context malicious demonstration [570], overloaded logical thinking [571], misspelling [572], multi-language mixture [573], rephrasing [538, 574, 575, 576], competing objectives and generalization mismatch [577], [wenjie: splitting sub-queries [578]], zero-shot generation [579], personal modulation [580]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.257, + 0.923, + 0.315 + ], + "angle": 0, + "content": "Optimization-based jailbreaks contain a multi-step optimization process to revise one unsafe prompt. Here, we further divide the optimization-based jailbreaks into gradient-based and LLM-based ones:" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.316, + 0.924, + 0.68 + ], + "angle": 0, + "content": "Gradient-based Optimization. GCG [260] appends one suffix to the target prompt, then utilizes the gradient of loss, which is calculated with the target (e.g., \"Sure\" or \"Yes\") and output, to optimize the soft prompt. Then, it greedily searches the best-matched tokens in the dictionary for soft prompt replacement. AutoDAN-B [535] solves the limited readability of GCG by constructing a proxy score where the perplexity is considered, which is utilized for greedy sampling. I-GCG [531] improves GCG by appending a template before the suffix and uses a multi-coordinate updating strategy and easy-to-hard initialization to optimize the suffix. COLD-Attack [581] adapts Energy-based Constrained Decoding with Langevin Dynamics for controllable adversarial prompt generation. MA-GCG [532] proposes momentum gradient to boost and stabilize the greedy search for tokens in adversarial prompts. A-GCG [533] introduces a smaller draft model than the target model to sample the promising suffix candidates for faster optimization. BOOST [582] enhances the existing jailbreak attacks by adding eos tokens to the end of the unsafe prompt. CRT [583] proposes an enhanced reinforcement learning-based jailbreak with consideration of prompt diversity. I-FSJ [584] deploys few-shot learning and demo-level random search." + }, + { + "type": "text", + "bbox": [ + 0.502, + 0.68, + 0.924, + 0.945 + ], + "angle": 0, + "content": "\\(\\Rightarrow\\) LLM-based Optimization. PAIR [261] constructs a system prompt and uses an attacker LLM to generate and revise adversarial prompts. It also uses a Judge model to assess the feedback from the victim, which is further utilized for revising the adversarial prompt. AutoDAN-A [534] utilizes crossover strategies and LLM-based mutation to revise adversarial prompts into stealthy sentences. AntoDAN-Trubo [539] AutoDAN-Turbo proposes to find useful strategies by prompting an LLM automatically. ToA (Tree of Attack) [536] iteratively uses an LLM to transform the unsafe prompt into two variations and keeps the prompt variation that achieves a higher score. Xiao et al. [585] adopt a similar pipeline with PAIR [261] and introduce malicious content concealing and memory reframing. Puzzler [586] proposes defensive and offensive measures to conduct an indirect jailbreak. GPT-FUZZER [587] starts from human-written prompts, and uses templates and mutation to rewrite unsafe prompts." + }, + { + "type": "list", + "bbox": [ + 0.502, + 0.316, + 0.924, + 0.945 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "20" + }, + { + "type": "table_caption", + "bbox": [ + 0.075, + 0.089, + 0.923, + 0.12 + ], + "angle": 0, + "content": "TABLE 6: A summary of attacks for LLM after deployment. Our evaluation includes representative studies that exemplify these security aspects. More details can be found in the main text. OS indicates whether the code is open-sourced." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.127, + 0.923, + 0.904 + ], + "angle": 0, + "content": "
AttacksMethodOSYearStrategySettingDatasetsTarget ModelsMetrics
Model ExtractionCarlini et al. [503]Yes2024Binary SearchBlack-boxNoneGPTs, LLaMA, Pythia,ada, cabbageQuery&TokenCost, MSE, RMS
Finlayson et al. [504]No2024Softmax BottleneckBlack-boxNonePythia, GPT-3.5Query Cost
Zanella et al. [505]No2024Matrix OperationsGrey-boxSST-2, MNLI, AGNewsBERTs, XLNetQuery Cost, Acc,Agreement
Horwitz et al. [506]Yes2024Spectral DeTuningWhite-boxLoWRAViT, SD, MistralMSWE, SEM
Membership InferenceMIN-K% PROB [509]Yes2023ProbabilitiesBlack-boxWikipediaLLaMAs, Pythia, NeoX,OPTTPR, FPR, ROC,AUC
MIN-K%++ [510]Yes2022Local MaximaBlack-boxWikiMIA, MIMIRPythia, GPT-NeoXLLaMA, OPT, MambaAUROC, TPR, FPR
Blind [511]Yes2024ThresholdBlack-box8 setsGPT-3, OpenLLaMAAUC ROC
LLM-DI [512]Yes2024AggregationBlack-boxPILEPythiasAUC, p-values
DE-COP [513]Yes2024ParaphrasesBlack-boxarXiv:Tection, BookTectionMistral, Mixtral, LLaMA, GPTs, ClaudeAUC
Recall [514]Yes2024Log-LikelihoodsBlack-boxWikiMIA, MIMIRPythia, GPT-NeoXLLaMA, OPT, MambaAUC, TPR@FPR
Noisy [515]No2024Embedding NGBRsGray-boxOpenWebText,WikipediaGPT-2TPR, FPR, AUC
SMIA [516]No2024PerturbationGray-boxWikipedia, FANPythia, Pythia-Deduped, GPT-NeosAUC-ROC, TPR, FPR
FEATAGG [517]No2024Feature AggregationBlack-boxProjectGutenberg,ArXivOpenLLaMATPR@FPR, AUC
RAG-MIA [520]No2024Direct AskingBlack-boxHealthCareMagic,Enronflan, llama, mistralTPR@FPR, AUC-ROC
JailbreakGCG [260]Yes2023Gradient-basedWhite-boxVicuna, LLaMA-2AdvBenchASR, Loss
AmpleGCG [530]Yes2024Hybrid-basedWhite-boxVicuna, Llama-2,Mis-tral,GPTsAdvBenchASR, US, Diver-sity, Time
I-GCG [531]Yes2024Gradient-basedWhite-boxAdvBench,HarmBenchVICUNA, GUANACOLLAMA, MISTRALASR
MA-GCG [532]Yes2024Gradient-basedWhite-boxAdvBenchVicuna, MistralASR, Time
A-GCG [533]Yes2024Gradient-basedWhite-boxAdvBenchLlama2, VicunaASR, Acc
AutoDAN-A [534]Yes2023LLM-basedBlack-boxAdvBenchVicuna, MistralASR, Recheck,PPL
AutoDAN-B [535]Yes2023Gradient-basedWhite-boxAdvBenchVicuna, Guanaco, PythiaASR, Recheck
PAIR [261]Yes2023LLM-basedBlack-boxJailbreakBenchVicuna, Llama-2, GPTs,Claudes,GeminiASR, QPS
ToA [536]Yes2023LLM-basedBlack-boxAdvBench, Harm123Vicuna, Llama-2, PaLM-2,GPTs, Claude3, GeminiGPT4-MetricHuman-Judge
PAL [537]Yes2024LLM-basedBlack-boxAdvBenchLlama-2, GPT-3.5ASR, Manual Labeling
Masterkey [538]No2023RephrasingBlack-boxAdvBench, Harm123GPTs, Bing, BardASR, QSR
AutoDAN-Turbo [539]Yes2024LLM-basedBlack-boxHarmbenchLlama-2, Gemma, GPT-4,GeminiASR, StrongRE-JECT
FlipAttack [540]Yes2025RephrasingBlack-boxAdvBench, StrongRE-JECTGPTs, Claude 3.5 Sonnet, Llama 3.1 405B, Mixtral 8x22BASR
Geneshift [541]Yes2025LLM-basedBlack-boxAdvBenchGPTsASR
Prompt InjectionIPP [542]Yes2022HandcraftBlack-boxOpenAI Examplestext-davinciASR
Greshake et al. [543]Yes2023Data PoisoningBlack-boxNonetext-davinci, GPT-4None
HOUYI [544]Yes2023Components AsmblBlack-boxFive QueriesSUPERTOOLSManual
Yan et al. [130]Yes2023PoisoningBlack-boxSeveral CasesAlpacaNgt, Pst, Ocrc
TT [545]No2023GameBlack-boxTensor TrustGPTs, Claudes, PaLM, LLaMAsRobustness Rate
JudgeDeceiver [546]Yes2024Gradient-basedWhite-boxMT-Bench, LLMBarMRPC, Jfleg, HSOL,RTE, SST2, SMSMistral, Openchat, LlamasACC, ASR, PACKEY-E, LM-E
AUPI [547]Yes2024Gradient-basedWhite-boxMRPC, Jfleg, HSOL,RTE, SST2, SMSLlama2ASR
AUTOHIJACKER [548]No2024LLM-basedBlack-boxAgentDojo, OPILlama, Command-R,GPTsASR
Data Extractionzlib [108]Yes2020Generate & InferenceBlack-boxTop-n, Temperature, InternetGPT-26 metrics
AutoSklearn [549]No2023Greedy, Contrastive, Beam decodingBlack-boxPileGPT-NeoPrecision, Recall,R@FPR
DECOM [550]No2024DecompositionBlack-boxNYT, WSJFrontiersTRM, EMP,BITAP
Context [551]No2022Context, Zero-shot,Few-shotBlack-boxEnron CorpusGPT-NeoAcc
ETHICIST [552]Yes2023Prompt TuningGray-boxLM-ExtractionGPT-NeoRecall
Pli-compass [553]No2024GroundingBlack-boxEnron emailGPT-JExtraction Rate
DSP [554]No2024Dynamic Soft PromptingBlack-boxLMEB, The StackGPT-Neo, Pythia, Star-CoderBaseEER, FER, PPL
PWB [555]Yes2024Gradient-basedWhite-boxPilePythia, LlamaPrecision, AUC,TPR
Prompt StealingSha et al. [556]No2024LLM-basedBlack-boxRetrievalQA,AlpacaGPT4ChatGPT, LLaMAAcc, Precision, Recall, AUC
output2prompt [557]Yes2024LLM-basedBlack-box3 User & 3 SystemPromptsLlamas, GPTsBLEU, CS, Preci-sion, Recall
PRSA [558]No2024Output DifferenceBlack-boxCategory18GPTsBLEU, FastKAS-SIM, JS
" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.921, + 0.043 + ], + "angle": 0, + "content": "21" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.053, + 0.491, + 0.112 + ], + "angle": 0, + "content": "ECLIPSE [588] uses an LLM as a suffix generator and optimizer. PAL [537] proposes an online proxy model (which is used for adversarial prompt generation) training pipeline." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.112, + 0.492, + 0.215 + ], + "angle": 0, + "content": "\\(\\Rightarrow\\) Others. EnJa [589] proposes to ensemble prompt and token-level attack methods via a template-based connector. AmpleGCG [530] first collects lots of successful suffixes and then trains the generative model to generate a specific suffix for a given unsafe prompt. Zhao et al. [590] targets the scenario where the decoding process of target LLM is assisted with smaller models' guidance." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.221, + 0.492, + 0.324 + ], + "angle": 0, + "content": "Prompt Injection Attacks. Prompt injection is a vulnerability where an attacker manipulates the input prompts of LLMs to force them to generate a specific output, which is usually out of the range for normal use (e.g., goal hijacking and prompt leaking [542]), often by injecting malicious text or commands into the input field. Attackers can employ a variety of techniques to carry out such attacks." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.33, + 0.493, + 0.593 + ], + "angle": 0, + "content": "\\(\\Rightarrow\\) Direct Prompt Injection. Perez et al. [542] directly inject handcrafted adversarial prompts into inputs to misalign the language model. HOUYI [544] proposes an injection generation framework which includes three components. Yan et al. [130] utilize LLMs to generate diverse trigger instructions that implicitly capture the characteristics of trigger scenarios. TENSOR TRUST leverages the TENSOR TRUST web game to generate a large-scale dataset and benchmark [545]. AUPI [547] adopts a gradient-based optimization method, specifically, a momentum-enhanced optimization algorithm, to generate universal prompt injection data. Upadhayay et al. [591] argue that LLMs suffer from cognitive overload and propose to use in-context learning to jailbreak LLMs through deliberately designed prompts that induce cognitive overload. Kwon et al. [592] circumvent security policies by substituting sensitive words—likely to be rejected by the language model—with mathematical functions." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.593, + 0.493, + 0.884 + ], + "angle": 0, + "content": "\\(\\nLeftrightarrow\\) Indirect Prompt Injection. Greshake et al. [543] propose to indirectly inject prompts into the data that are likely to be retrieved. Bagdasaryan et al. [593] design a prompt injection attack against multi-modal LLMs, by generating an adversarial perturbation corresponding to the prompt and blending it into an image or audio recording. Neural Exec [594] designs a multi-stage preprocessing pipeline for cases like Retrieval-Augmented Generation (RAG)-based applications. PoisonedAlign [595] boosts the success of prompt injection attacks by strategically creating poisoned alignment samples in the LLM's alignment process. TPIA [596] crafts non-functional perturbations that contain malicious information and inserts them into the victim's code context by spreading them into potentially used dependencies like packages or RAG's knowledge base. F2A [597] proposes to use feign security detection agents to bypass the defense mechanism of LLMs. AUTOHIJACKER [548] uses a batch-based optimization framework to handle sparse feedback and leverages a trainable memory to enable effective generation." + }, + { + "type": "text", + "bbox": [ + 0.068, + 0.884, + 0.492, + 0.944 + ], + "angle": 0, + "content": "Different Settings. JudgeDeceiver uses gradient-based optimization to inject LLM-as-a-Judge scenarios [546]. Pedro et al. [598] study the risk of injections targeting web applications based on the Langchain framework. Lee et" + }, + { + "type": "list", + "bbox": [ + 0.068, + 0.33, + 0.493, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.053, + 0.923, + 0.157 + ], + "angle": 0, + "content": "al. [599] propose a human-AI collaborative framework to explore the potential of prompt injection against federated military LLMs. PROMPT INFECTION [600] proposes to make malicious prompts self-replicate across interconnected agents in multi-agent systems. Zhang et al. [601] explore the risk of prompt injection in LLM-integrated systems like LLM-integrated mobile robotic systems." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.163, + 0.924, + 0.235 + ], + "angle": 0, + "content": "Data Extraction Attacks. Data extraction attacks try to figure out the personally identifiable information (PII) that is used to train the LLMs [108]. It starts from sufficient-length prefixes to perform extraction and additional measures to determine if extracted texts are valid." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.243, + 0.924, + 0.606 + ], + "angle": 0, + "content": "\\(\\nLeftrightarrow\\) Methods. In the beginning work [108], the proposed extraction process contains two stages \"generate-then-rank\": sampling potentially memorized examples and membership inference. It proposes a temperature-decaying method to sample more diverse examples and use surrogate models to infer the membership. After that, Al-Kaswan et al. [549] propose using greedy, contrastive, and beam decoding strategies to generate examples and use a classifier to infer the membership. Su et al. [550] propose an instruction decomposition technique to extract fragments of training data gradually. Huang et al. [551] extensively explore the effect of context, zero-shot, and few-shot methods in extracting the personal email address. ETHICIST proposes a smoothing loss and a calibrated confidence estimation method to extract the suffix and measure the confidence [552]. Nakka et al. [553] improves the extraction performance by grounding the prefix of the manually constructed extraction prompt with in-domain data. Wang et al. [554] propose to train a transformer-based generator to produce dynamic, prefix-dependent soft prompts. Ozdayi et al. [105] introduce an approach that uses prompt tuning to control the extraction rates of memorized content. Meng et al. [602] propose a two-stage method, i.e., collection and ranking, to recover PPI when PII entities have been masked." + }, + { + "type": "text", + "bbox": [ + 0.502, + 0.608, + 0.924, + 0.927 + ], + "angle": 0, + "content": "Different Settings. Some works also explore the risk of data leakage in novel settings. Wang et al. [555] study the probability of data extraction in fine-tuning settings and Bargav et al. [603, 604] extract the training data by comparing the output difference before and after the fine-tuning. Jiang et al. [605, 606, 607] propose to extract the private Retrieval-Augmented Generation (RAG) documents. Peng et al. [608] extract the private RAG documents by poisoning in the fine-tuning process. Nasr et al. [107] explore the potential risk of data extraction for the aligned production language models. Panda et al. [609] extract the fine-tuning secret data by poisoning the pertaining dataset. Lu et al. [610] propose to extract PII from an aligned model with model merging. Chen et al. [611] find that fine-tuning can recover the forgotten PIIs in pretraining data. Panchendrarajan et al. [612] propose to extract the whole private training data in the fine-tuning process. Rashid et al. [613] propose selective weight tampering to explore PPI leakage in Federated Language Models. Dentan et al. [614] extract data from layout-aware document understanding models like unimodal or bimodal models." + }, + { + "type": "text", + "bbox": [ + 0.501, + 0.928, + 0.923, + 0.944 + ], + "angle": 0, + "content": "Different Applications. Leveraging the abnormally high" + }, + { + "type": "list", + "bbox": [ + 0.501, + 0.243, + 0.924, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "22" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.054, + 0.493, + 0.287 + ], + "angle": 0, + "content": "token probabilities, some works utilize the memorization of LLMs to extract the fingerprint or steganography [615]. Al-Kaswan et al. [616] explore memorization in large language models for code and find that code models memorize training data at a lower rate than natural language models. Nie et al. [617] utilize the token-level features derived from the identified characteristics to decode the PII. Lehman et al. [618] reveal the risk of Electronic Health Records leakage of LLMs. Diera et al. [619] conduct experiments to assess the PII leakage of fine-tuned BERT models and found that Differential Privacy (DP) has a negative effect when deployed in fine-tuning. Zhang et al. [620] propose data extraction attacks against text classification with transformers. Huang et al. [621] propose an evaluation tool, i.e. HCR, to assess the PPI leakage in Neural Code Completion Tools." + }, + { + "type": "text", + "bbox": [ + 0.069, + 0.288, + 0.493, + 0.476 + ], + "angle": 0, + "content": "\\(\\nrightarrow\\) Factor Assessment. Some work studies the factors of data extraction including decoding schemes, model sizes, prefix lengths, partial sequence leakages, and token positions [622, 623]. Yash et al. [624] explore the effects of prompt sensitivity and access to multiple checkpoints to extraction attacks. Staab et al. [625] construct a dataset consisting of real Reddit profiles to extract personal attributes. Xu et al. [626] conduct experiments to evaluate the factors of different suffix generation methods and different membership inference attacks in extraction performance. Karamolegkou et al. [627] evaluate the effect of model structure, data type, probing strategies, and metrics." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.478, + 0.493, + 0.741 + ], + "angle": 0, + "content": "Prompt Stealing Attacks. Given that crafting effective prompts requires significant engineering effort and can be considered valuable intellectual property (IP), promptstealing attacks aim to compromise this IP by reconstructing prompts from generated responses [556, 557, 558]. These generation effects are often used to attract prospective prospective buyers. Sha et al. [556] pioneer this approach by collecting a dataset and training classifiers to predict prompt parameters—such as whether the prompt is direct, role-based, or in-context. They then used a large language model (LLM) to reconstruct the prompt. Similarly, Zhang et al. [557] trained an LLM on output-prompt pairs to directly infer the original prompt, while Yang et al. [558] leveraged generation differences to refine surrogate prompts. However, recovering the original prompt solely from the output is challenging. Out of this, Zheng et al. [628] propose a timing-based side-channel method to infer the prompt during inference." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.751, + 0.395, + 0.766 + ], + "angle": 0, + "content": "6.1.2 Defensive Mechanisms in Deployment" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.768, + 0.491, + 0.914 + ], + "angle": 0, + "content": "In Subsubsection 6.1.1, we analyzed various attack scenarios targeting individual LLM deployments. However, in real-world applications, defense mechanisms are not designed as isolated, one-to-one countermeasures against specific attacks. Instead, they follow fundamental security principles to establish a systematic defense framework, as illustrated in Figure 9. This framework integrates multiple layers of protection, ensuring resilience against a wide range of adversarial threats while maintaining model usability and efficiency." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.914, + 0.492, + 0.945 + ], + "angle": 0, + "content": "Input Preprocessing Defenses Input preprocessing serves as the first line of defense in LLM deployment, aiming to" + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.059, + 0.764, + 0.072 + ], + "angle": 0, + "content": "Defensive Mechanisms in Deployment" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.072, + 0.916, + 0.16 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.505, + 0.169, + 0.922, + 0.2 + ], + "angle": 0, + "content": "Fig. 9: The overview of attacks in single LLM's deployment phase." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.226, + 0.922, + 0.254 + ], + "angle": 0, + "content": "detect and neutralize adversarial inputs before they reach the model." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.256, + 0.923, + 0.504 + ], + "angle": 0, + "content": "Attack Detection & Identification: Effective input filtering [629, 630] begins with attack detection [631], which identifies adversarial prompts through statistical [632], structural [633], or behavioral inconsistencies [634]. Gradient-based detection methods [635] leverage safety-critical gradient analysis and loss landscape exploration to uncover jailbreak prompts that manipulate LLM behavior. These approaches identify adversarial inputs [636, 637] by analyzing how small perturbations [638] affect model outputs, detecting highly sensitive or misaligned gradients that indicate targeted attacks. Perplexity-based methods [632, 632] measure the probability distribution of input sequences, flagging atypical or low-likelihood prompts as potential adversarial inputs. These techniques are particularly effective in detecting prompt injection and adversarial perturbations, where crafted prompts deviate significantly from natural language distributions." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.505, + 0.923, + 0.621 + ], + "angle": 0, + "content": "Beyond individual heuristics, universal detection frameworks [639] integrate multiple detection strategies to counter diverse attack vectors, including prompt injection [640], backdoor manipulations [641], and adversarial attacks [637]. These frameworks employ ensemble-based filtering mechanisms, combining gradient analysis [642], perplexity estimation [643], and syntactic evaluation for generalized attack resilience." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.622, + 0.923, + 0.87 + ], + "angle": 0, + "content": "Semantic & Behavioral Analysis: Attack detection alone is insufficient, as certain adversarial inputs may bypass traditional filtering mechanisms. Semantic [644] and behavioral analysis enhance input preprocessing by evaluating linguistic intent and model alignment. Self-examination techniques allow LLMs [645, 646] to assess whether they are being manipulated, leveraging auxiliary reasoning steps to detect deceptive prompts. Alignment-based verification [647] ensures that the model's responses remain consistent with its safety objectives [330], identifying inputs that subtly nudge the model toward policy violations or ethical misalignment. Intention analysis [648, 649] further refines input filtering by discerning subtle manipulations designed to bypass explicit security checks. Unlike token-level detection, which flags overtly adversarial inputs, intention-aware defenses analyze the semantic structure and purpose of the input to preemptively reject jailbreak attempts." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.87, + 0.922, + 0.945 + ], + "angle": 0, + "content": "Adversarial Defense & Mitigation: When detection and behavioral analysis fail to fully neutralize adversarial inputs, robustness-enhancing techniques [647] mitigate their effects by reducing model susceptibility to manipulation [334, 650]. Semantic smoothing [651, 652] techniques" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "23" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.493, + 0.142 + ], + "angle": 0, + "content": "introduce controlled randomness into LLM responses, reducing the model's sensitivity to adversarial perturbations and preventing reliable jailbreak execution. By stabilizing decision boundaries [653], these methods enhance resistance against prompt manipulation strategies that exploit response predictability." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.142, + 0.493, + 0.258 + ], + "angle": 0, + "content": "Preemptive input transformations [654], such as back-translation [655] or paraphrasing, modify incoming queries [651] while preserving semantic intent, disrupting adversarial structures embedded within malicious prompts. Data augmentation [656] and adversarial training further strengthen model robustness by exposing LLMs to adversarial prompts during training, forcing them to learn invariances that reduce their vulnerability to real-world attacks." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.258, + 0.493, + 0.389 + ], + "angle": 0, + "content": "Output Filtering Mechanisms. Output filtering mechanisms [212, 657] serve as a critical safeguard in LLM deployment, ensuring that generated responses comply with safety constraints while preserving informativeness. Unlike input preprocessing, which aims to prevent adversarial prompts from reaching the model, output filtering mitigates harmful content post-generation. Existing approaches primarily follow three paradigms: rule-based constraints, generative adversarial filtering, and toxicity detection." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.39, + 0.492, + 0.549 + ], + "angle": 0, + "content": "Rule-based mechanisms [658] impose predefined constraints on model outputs, preventing the generation of harmful, unethical, or undesired content. Programmable guardrails [659] offer a structured framework where developers can enforce response filtering, topic restriction, and ethical alignment. These methods often integrate reinforcement learning from human feedback [155] or rule-based reward [660] modeling to refine output safety. While effective at handling explicit violations, static rule-based methods struggle with nuanced adversarial prompts and subtle misalignments." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.55, + 0.493, + 0.738 + ], + "angle": 0, + "content": "To address these limitations, generative adversarial filtering [661] leverages self-critique [662, 663], ensemble detection, and dynamic response evaluation [664]. Self-rectification mechanisms [663, 665] enable LLMs to critique their own outputs and refine responses through iterative refinement. Additionally, ensemble-based [666] moderation models aggregate predictions from multiple LLMs, improving robustness against circumvention techniques. Adaptive filtering frameworks [667] employ perplexity-based assessments and adversarial perturbation detection to flag responses deviating from expected linguistic patterns, enhancing their resilience against jailbreak attempts [668, 669] and toxic content injection." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.739, + 0.493, + 0.913 + ], + "angle": 0, + "content": "Toxicity detection [670, 671, 672] and content moderation [673, 674, 675, 676] further reinforce output safety by identifying and mitigating hate speech [677], misinformation, and other harmful content. Supervised finetuning adapts LLMs to recognize undesirable patterns, while classifier-based detection models [678] filter responses in real-time. Some approaches introduce debiasing strategies, such as controlled decoding [679, 680] and anti-expert guidance [681], to suppress toxic outputs without sacrificing response diversity. However, these methods face challenges in balancing false positives and false negatives, particularly in ambiguous or context-dependent cases." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.914, + 0.493, + 0.943 + ], + "angle": 0, + "content": "The effectiveness of output filtering hinges on its ability to balance strict control with linguistic flexibility, ensur" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.923, + 0.213 + ], + "angle": 0, + "content": "ing that models remain both safe and practically useful. A hybrid approach combining rule-based safeguards, self-correcting mechanisms, and adaptive toxicity moderation is essential to achieving robust and scalable LLM deployment. Robust Prompt Engineering. Robust prompt engineering aims to enhance LLM safety by designing input prompts that resist adversarial manipulation [682], protect sensitive data, and mitigate harmful outputs—all [683] without modifying model parameters. These strategies act at the interaction level, offering lightweight and model-agnostic protection." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.214, + 0.924, + 0.461 + ], + "angle": 0, + "content": "Recent efforts have introduced prompt optimization techniques grounded in adversarial robustness, including embedding-space manipulation and defensive objective alignment. Methods such as Robust Prompt Optimization [684] and Prompt Adversarial Tuning generate transferable suffixes [668] or prefix [685] embeddings to guide model behavior [686] under attack [687], effectively lowering jailbreak success rates while preserving task performance. Similarly, goal prioritization frameworks [688] enforce inference-time objective consistency, dynamically resolving conflict between user instructions and safety constraints without requiring access to malicious samples. Complementary to these strategies, patch-based methods integrate interpretable suffixes or structured self-reminders [689] into prompts, reducing the model's susceptibility to coercive inputs through lightweight, modular defenses." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.462, + 0.924, + 0.55 + ], + "angle": 0, + "content": "Structural manipulation approaches [690] neutralize adversarial intent through prompt rewriting. Spotlighting [691] injects source-attribute signals to counter indirect prompt injection, while inverse prompt engineering [692] repurposes attack data to generate task-specific defensive prompts under the principle of least privilege." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.55, + 0.924, + 0.724 + ], + "angle": 0, + "content": "Privacy-preserving prompt [693] design introduces formal guarantees through differential privacy. Approaches like DP-Prompt [694] and stochastic gradient masking [695] reduce information leakage from prompts without harming performance. Desensitization and directional control of incontext representations offer additional privacy protections during prompt construction. Prompt engineering [579, 696] also helps mitigate societal risks. Chain-of-thought prompting and guided templates reduce gender bias [697] in reasoning tasks, while prompt learning [698] improves toxicity detection and generation control [699, 700], often surpassing specialized models in efficiency and generalization." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.724, + 0.923, + 0.812 + ], + "angle": 0, + "content": "Finally, systematic prompt optimization methods [701, 702] aim to generalize prompt robustness across tasks and domains. Techniques like BATPrompt [703] and StraGo [704] use adversarial simulation and strategic decomposition to refine prompts iteratively, improving both resilience and effectiveness under variable inputs." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.812, + 0.924, + 0.943 + ], + "angle": 0, + "content": "System-level Security Controls. System-level defenses [705] enhance LLM deployment by optimizing inference, enforcing alignment, isolating untrusted inputs, and securing the supply chain. Systems like Petals [706], Sarathi-Serve [707], and DistServe [708] restructure computation to improve throughput and latency, while TriForce [709], Medusa [710] MagicDec [711] accelerate generation via speculative decoding and structural compression. Parallel frameworks such as DeepSpeed-FastGen [712] and SpecExec [713] further boost" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "24" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.314, + 0.068 + ], + "angle": 0, + "content": "efficiency with minimal overhead." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.068, + 0.491, + 0.171 + ], + "angle": 0, + "content": "Runtime alignment methods [714] adapt model behavior through cross-model guidance or token-level reward modeling. Systems such as SelfDefend [715] and Gradient Cuff [716] detect unsafe generation by monitoring agreement across models or loss landscapes, while Spotlighting [691] inserts provenance signals to mitigate indirect prompt injection." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.171, + 0.492, + 0.245 + ], + "angle": 0, + "content": "Access isolation is achieved through policy enforcement [717] and system wrappers [688]. At the supply level, tools like MalHug [718] identify poisoned models, while system audits reveal sandbox and plugin vulnerabilities, highlighting the need for end-to-end secure deployment." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.245, + 0.491, + 0.378 + ], + "angle": 0, + "content": "LLM-based guard models utilize lightweight LLMs like Llama Guard [330], Aegis Guard [719, 720], WildGuard [721], and ShieldGemma [722] to moderate both the input and output of the victim LLMs. However, they are purely classifiers. To solve this problem, the first reasoning-based guard model named GuardReasoner [723] is proposed to improve the performance, explainability, and generalization ability via learning to reason. It brings new opportunities for the safety of large-scale reasoning models [724]." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.393, + 0.428, + 0.408 + ], + "angle": 0, + "content": "6.1.3 Evaluation and Benchmarks in Deployment" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.412, + 0.492, + 0.487 + ], + "angle": 0, + "content": "To assess the reliability and safety of LLMs after deployment, evaluation efforts focus on several key dimensions and risk types, as illustrated in Figure 10. These dimensions guide the design of systematic benchmarks and metrics tailored for real-world deployment settings." + }, + { + "type": "image", + "bbox": [ + 0.077, + 0.503, + 0.49, + 0.653 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.662, + 0.478, + 0.693 + ], + "angle": 0, + "content": "Fig. 10: The overview of evaluation and benchmarks in single LLM's deployment phase." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.706, + 0.49, + 0.722 + ], + "angle": 0, + "content": "Robustness Evaluation. To systematically assess the relia-" + }, + { + "type": "table_caption", + "bbox": [ + 0.079, + 0.733, + 0.486, + 0.763 + ], + "angle": 0, + "content": "TABLE 7: Summary of LLM robustness benchmarks at the deployment stage." + }, + { + "type": "table", + "bbox": [ + 0.076, + 0.767, + 0.49, + 0.94 + ], + "angle": 0, + "content": "
BenchmarkAdversarialNaturalJailbreakToxicity
JailbreakBench [306]
HarmBench [305]
JAMBench [725]
JailbreakEval [726]
Latent Jailbreak [727]
PromptRobust [728]
SelfPrompt [729]
Chen et al. [730]
Chu et al. [731]
AdvGLUE [732]
AdvGLUE++ [333]
NoiseLLM [733]
NEO-BENCH [734]
CompressionEval [735]
" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.923, + 0.214 + ], + "angle": 0, + "content": "bility of large language models (LLMs) after deployment, we categorize robustness evaluation into two broad types: adversarial robustness and natural robustness. Adversarial robustness focuses on evaluating how LLMs respond to malicious or adversarial inputs, such as jailbreak prompts, prompt injections, or red-teaming attacks. Natural robustness, on the other hand, assesses LLM behavior under nonmalicious but realistic distribution shifts, including typos, paraphrasing, novel word usage, or temporal drift. A summary of representative benchmarks categorized along these 4 dimensions is presented in Table 7." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.214, + 0.923, + 0.695 + ], + "angle": 0, + "content": "Adversarial Robustness: A range of benchmarks and frameworks have been proposed for adversarial robustness. JailbreakBench [306] provides a standardized evaluation suite for jailbreak attacks, containing 100 misuse behaviors and an evolving repository of adversarial prompts. HarmBench [305] proposes a comprehensive red-teaming evaluation framework that includes 510 harmful behaviors spanning diverse semantic and functional categories, supporting both text-only and multimodal inputs across 33 LLMs. JAMBench [725] targets the evaluation of moderation guardrails using 160 carefully constructed prompts across four major risk categories and introduces a cipher-character-based attack. JailbreakEval [726] offers a unified toolkit for jailbreak assessment with string-matching, classifier-based, and LLM-based evaluators. Latent Jailbreak [727] focuses on detecting embedded malicious intent in seemingly benign prompts and evaluates instruction-following robustness using a hierarchical annotation scheme. PromptRobust [728] benchmarks prompt-level robustness with character, word, sentence, and semantic-level perturbations across 13 datasets and 8 NLP tasks. SelfPrompt [729] enables autonomous robustness evaluation through knowledge-guided prompt generation and LLM-based self-assessment. Chu et al. [731] conduct a large-scale comparison of 17 jailbreak attacks on 8 LLMs and 160 forbidden prompts, proposing a unified taxonomy and benchmarking various defenses. Chen et al. [730] propose a multi-dimensional framework assessing jailbreak reliability over 13 LLMs and 1,525 prompts, integrating metrics such as attack success rate (ASR), toxicity, fluency, and grammatically. Zhang et al. [736] propose a novel definition and benchmark for LLM's content moderation based on a sensitive-semantic perspective." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.695, + 0.923, + 0.944 + ], + "angle": 0, + "content": "Natural Robustness: Several benchmarks focus on evaluating LLMs under realistic but benign input perturbations or distribution shifts. AdvGLUE [732] and AdvGLUE++ [333] extend the original GLUE benchmark [737] with semantically-preserving perturbations at logic, word, and sentence levels. NoiseLLM [733] presents a unified framework for evaluating slot-filling robustness under character-, word-, and sentence-level noise, including typos and paraphrases. NEO-BENCH [734] assesses robustness to temporal drift by introducing neologisms into tasks such as machine translation, classification, and question answering. CompressionEval [735] provides a prompt-free evaluation framework using lossless compression to assess generalization and robustness, comparing LLM performance on content before and after the model's knowledge cutoff. These benchmarks offer complementary perspectives for assessing LLM performance under both malicious and naturally" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.423, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "25" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.055, + 0.264, + 0.068 + ], + "angle": 0, + "content": "occurring input variations." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.068, + 0.491, + 0.083 + ], + "angle": 0, + "content": "Content Trustfulness and Fairness Evaluation. Beyond ro" + }, + { + "type": "table_caption", + "bbox": [ + 0.088, + 0.093, + 0.478, + 0.124 + ], + "angle": 0, + "content": "TABLE 8: Summary of content trustfulness and fairness evaluation benchmarks for LLMs at deployment stage." + }, + { + "type": "table", + "bbox": [ + 0.076, + 0.127, + 0.493, + 0.264 + ], + "angle": 0, + "content": "
BenchmarkHallucinationFactualityToxicityBiasDiscrimination
HaluEval [738]
Med-HALT [739]
ANAH [740]
SelfCheckGPT [741]
DoLa [742]
Mundler et al. [743]
Elaraby et al. [744]
Ji et al. [745]
Zhang et al. [746]
Guo et al. [747]
RTP-LX [748]
ROBBIE [749]
CEB [750]
" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.273, + 0.492, + 0.389 + ], + "angle": 0, + "content": "bustness, a key dimension of deployment-stage evaluation concerns the trustfulness and fairness of LLM-generated content. This includes detecting and mitigating outputs that are factually incorrect (hallucinations), misleading (low factuality), harmful (toxic), or unfair (biased or discriminatory). We categorize existing benchmarks into five axes: hallucination, factuality, toxicity, bias, and discrimination, and summarize representative works in Table 8." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.39, + 0.493, + 0.622 + ], + "angle": 0, + "content": "Benchmarks in this space target either the accuracy of generated content or its alignment with human values. For hallucination and factuality evaluation, HaluEval [738] and MedHALT [739] provide reference-based hallucination annotations in general and medical domains, respectively, while ANAH [740] delivers fine-grained, human-annotated hallucination labels with correction spans. SelfCheckGPT [741] detects hallucinations via consistency checks across multiple generations, and DoLa [742] proposes a decoding strategy that contrasts internal layer activations to reduce factual errors. Other works such as Mundler et al. [743], Elaraby et al. [744], and Ji et al. [745] leverage taxonomic definitions or internal model signals to quantify or predict hallucination risk. Zhang et al. [746] introduce FEWL, a reference-free evaluation framework that uses agreement across reference LLMs to approximate hallucination likelihood." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.622, + 0.492, + 0.724 + ], + "angle": 0, + "content": "In terms of toxicity detection, Guo et al. [747] show that role-playing prompts (persons) can elicit toxic behavior from ChatGPT, and RTP-LX [748] evaluates multilingual LLMs in detecting culturally sensitive harm. Both studies reveal that current LLMs remain vulnerable to subtle toxic or culturally biased outputs, especially in low-resource languages or when confronted with indirect harm." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.724, + 0.492, + 0.841 + ], + "angle": 0, + "content": "For evaluating social bias and discrimination, ROBBIE [749] benchmarks LLMs across 12 demographic axes with template-based prompts and multiple toxicity and regard metrics, covering gender, race, religion, and intersections thereof. CEB [750] proposes a compositional taxonomy for fairness evaluation and introduces multiple new datasets spanning stereotyping, toxicity, and classification bias, supporting both direct and indirect evaluation modes." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.841, + 0.492, + 0.928 + ], + "angle": 0, + "content": "These benchmarks collectively provide a multidimensional view of content trustfulness and fairness, enabling the systematic evaluation of LLMs beyond syntactic correctness or surface fluency. As safety-critical deployment scenarios become increasingly prevalent, such evaluation tools play a central role in ensuring the responsible use of LLMs." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.928, + 0.492, + 0.943 + ], + "angle": 0, + "content": "Data Privacy and Leakage Evaluation. Data privacy is" + }, + { + "type": "table_caption", + "bbox": [ + 0.512, + 0.049, + 0.918, + 0.08 + ], + "angle": 0, + "content": "TABLE 9: Summary of privacy evaluation benchmarks for LLMs at the deployment stage." + }, + { + "type": "table", + "bbox": [ + 0.521, + 0.093, + 0.905, + 0.235 + ], + "angle": 0, + "content": "
BenchmarkPIIMIAEIACompliance
PrivLM-Bench [751]
LLM-PBE [752]
PrivAuditor [753]
Rossi et al. [754]
Whispered Tuning [755]
ProPILE [103]
PrivaCI-Bench [756]
Commercial Audit [757]
LessLeak-Bench [758]
SecureSQL [759]
DecodingTrust [333]
" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.258, + 0.923, + 0.346 + ], + "angle": 0, + "content": "a critical dimension in evaluating the trustworthiness of LLMs at deployment. Table 9 summarizes representative benchmarks that assess privacy risks along four axes: personally identifiable information (PII) leakage, membership inference attacks (MIA), embedding inversion attacks (EIA), and regulatory or contextual compliance." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.346, + 0.923, + 0.462 + ], + "angle": 0, + "content": "PrivLM-Bench [751] and LLM-PBE [752] offer comprehensive multi-level evaluations spanning all three major attack types. PrivAuditor [753] and Rossi et al. [754] focus on adaptation-stage vulnerabilities across a variety of finetuning techniques. Whispered Tuning [755] proposes a differential privacy-based training scheme to reduce leakage, while ProPILE [103] tests whether LLMs can reconstruct sensitive information from prompts related to known users." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.462, + 0.923, + 0.594 + ], + "angle": 0, + "content": "PrivaCI-Bench [756] and Commercial Audit [757] emphasize regulatory compliance, evaluating model behavior against privacy expectations and legal frameworks such as GDPR and the EU AI Act. SecureSQL [759] examines leakage in structured query generation, and LessLeak-Bench [758] reveals code-specific leakage across software engineering benchmarks. Finally, DecodingTrust [333] includes privacy as part of a broader trustworthiness suite, auditing GPT models across multiple risk dimensions." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.594, + 0.923, + 0.637 + ], + "angle": 0, + "content": "Together, these benchmarks provide a foundation for assessing LLM privacy risks across diverse modalities, attack surfaces, and deployment scenarios." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.637, + 0.923, + 0.753 + ], + "angle": 0, + "content": "Multi-modal Safety Evaluations As multimodal large language models (MLLMs) become increasingly integrated into real-world applications, ensuring their safety under diverse input conditions is essential. A growing number of studies have proposed evaluation benchmarks and frameworks to assess MLLM vulnerabilities across multiple dimensions [760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.753, + 0.923, + 0.914 + ], + "angle": 0, + "content": "Jailbreak evaluation has received significant attention, with benchmarks such as MM-SafetyBench [760] and Jailbreakv-28k [761] targeting harmful instruction-following behaviors. MMJ-Bench [762] and Retention Score [763] further extend jailbreak assessment to include visual robustness and long-term safety retention. For hallucination, several works diagnose MLLM failures arising from inconsistencies between visual inputs and generated text, including HallusionBench [764], POPE [765], and Bingo [766]. SIUO [767] complements this direction by evaluating cross-modality consistency under seemingly benign inputs." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.914, + 0.923, + 0.943 + ], + "angle": 0, + "content": "Robustness under adversarial visual corruption is assessed in MVTamperBench [768] and B-AviBench [769]," + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "26" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.053, + 0.493, + 0.169 + ], + "angle": 0, + "content": "which introduce perturbed or misleading visual stimuli to test model stability. Meanwhile, fairness and social bias have been evaluated through VIVA [770], GenderBiasVL [771], FACET [772], FairDeDup [773], CounterBias [774], PAIRS [775], DeAR [776], and MMBias [777], covering gender, racial, and intersectional dimensions using parallel image sets, counterfactual probing, and real-world dataset imbalances." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.171, + 0.492, + 0.274 + ], + "angle": 0, + "content": "To unify these evaluation directions, several comprehensive frameworks have emerged. MultiTrust [778] and SPAVL [779] aim to benchmark MLLMs across diverse safety criteria, including robustness, fairness, and harmfulness. Q-Eval-100K [780] complements these efforts by focusing on visual generation quality and alignment under instruction-following settings." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.275, + 0.492, + 0.334 + ], + "angle": 0, + "content": "Collectively, these benchmarks highlight the unique challenges posed by multimodal interactions and the growing need for holistic, scalable safety evaluations tailored to MLLMs." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.364, + 0.262, + 0.38 + ], + "angle": 0, + "content": "6.2 Single-agent Safety" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.388, + 0.493, + 0.593 + ], + "angle": 0, + "content": "In this section, we focus on security issues related to a single agent. We first define an agent as an interactive entity that uses an LLM as the core for reasoning, decision-making, and reflection while integrating memory, tools, and the environment as capability-enhancing components. Beyond the deployment risks associated with the LLM core, we introduce the security issues arising from these three additional modules. Specifically, for tools (Section 6.2.2) and memory (Section 6.2.3), we summarize existing work from both attack (Section 6.2.4) and defense (Section 6.2.5) perspectives to identify technical paradigms. For the environment (Section 6.2.6), we explore unique security challenges from the perspective of various agent-interaction settings. We demonstrate an overview of agent safety in Figure 12." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.614, + 0.26, + 0.63 + ], + "angle": 0, + "content": "6.2.1 Definition of Agent" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.636, + 0.495, + 0.945 + ], + "angle": 0, + "content": "LLM-driven agent refers to an AI system capable of operating independently or with limited human oversight, where a sophisticated language model [6, 783, 784, 785] serves as the foundational intelligence for processing inputs, executing tasks, and engaging in interactions. By leveraging advanced natural language understanding and generation, such agents [29, 786, 787, 788, 789] can analyze information, resolve queries, and adapt to user or environmental inputs [790, 791, 792]. To extend their functionality, they frequently incorporate supplementary mechanisms—such as data storage modules [23, 793, 794, 795], external software interfaces [790, 796, 797], or strategic reasoning frameworks [798]—allowing them to transcend basic text production. This adaptability makes them valuable for diverse implementations, including interactive dialogue systems [799], workflow optimization [800, 801, 802, 803], and complex decision-making scenarios [804]. In this study, we focus on deconstructing agent safety into three critical dimensions: tool utilization, memory management, and environment-specific security concerns. We demonstrate the components and structures of agent systems in Figure 11." + }, + { + "type": "title", + "bbox": [ + 0.506, + 0.054, + 0.64, + 0.069 + ], + "angle": 0, + "content": "6.2.2 Tool Safety" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.077, + 0.925, + 0.355 + ], + "angle": 0, + "content": "Some works enable LLM agents to learn how to use tools by generating datasets and fine-tuning the model for API usage [25, 805]. Specifically, tools can be implemented in various forms, including but not limited to code-based API functions (e.g., search engine [806] and calculator), embodied intelligence like robotic arms [807], and more. A tool serves as a bidirectional medium: on one hand, it allows the agent to map internal decisions into actions within the interactive environment; on the other hand, it also acts as a means for the agent to collect information from the external world. Given the pivotal role of tools in agent components, the related security issues are worth exploring [74]. For example, in the field of web security, Fang et al. [808, 809] investigate how autonomous agents, when equipped with appropriate tools, can independently compromise websites and exploit one-day vulnerabilities in real-world systems without human intervention. Next, we will summarize and discuss existing research from attack perspectives and figure out the lack of tool invocation defense in current research." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.356, + 0.925, + 0.575 + ], + "angle": 0, + "content": "Attacks. Based on the target of the attack, safety-related attacks involving tools can be categorized into Tool-aided Attacks and Tool-targeted Attacks. The former refers to attackers utilizing agents equipped with tools to execute attacks that LLMs cannot independently assist with, such as leveraging agents with web access and code execution capabilities to facilitate cyberattacks. The latter involves attackers targeting the tool invocation process itself, attempting to manipulate or induce tool selection for malicious purposes through various attack methods. However, from the perspective of the technical stack of attacks, the two can be unified. We have identified new applications of traditional LLM attack methods in tool safety, as well as novel attack paradigms that have emerged due to the unique characteristics of tools." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.576, + 0.925, + 0.752 + ], + "angle": 0, + "content": "Jailbreak. Similar to jailbreak methods in LLM safety, agent jailbreak also bypasses the agent's built-in safety mechanisms through specific prompts to elicit malicious responses. However, in the agent scenario, the malicious behaviors it aims to induce are different. Specifically, Cheng et al. [810] manually craft jailbreak prompts to extract personal information from the training data of code-generation agents. In contrast, Fu et al. [811] and Imprompter [812] both employ gradient-based optimization like GCG [260] to automatically generate input prompts or images that manipulate agents into leveraging tools for privacy breaches in dialogues or executing harmful actions on user resources." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.753, + 0.926, + 0.945 + ], + "angle": 0, + "content": "Injection. This type of attack can be summarized into two forms of injection: Prompt Injection (similar to LLM safety vulnerabilities) where malicious instructions are embedded in input data, exploiting the difficulty LLMs face in distinguishing between instructions and data. Another form is Tool Injection where malicious tools are injected to enable further exploitation, such as using the tool to execute malicious actions. For example, BreakingAgents [813] utilizes human-crafted prompt injections to execute malfunction attacks, causing agents to engage in repetitive or irrelevant actions, with additional exploration into the propagation of such attacks within Multi-Agent Systems (MAS). ToolCommander [814] is the second type. It proposes a two-stage" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "27" + }, + { + "type": "image", + "bbox": [ + 0.076, + 0.053, + 0.924, + 0.352 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.237, + 0.352, + 0.758, + 0.368 + ], + "angle": 0, + "content": "Fig. 11: The overview of LLM-based single-agent and multi-agent systems." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.393, + 0.491, + 0.451 + ], + "angle": 0, + "content": "attack strategy: first, injecting malicious tools to steal user queries, and subsequently manipulating tool selection using the stolen data, thereby achieving privacy theft and denial-of-service attacks." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.455, + 0.491, + 0.674 + ], + "angle": 0, + "content": "Backdoor. Backdoor attacks also find utility in the context of agent safety, but unlike LLMs, LLM agents develop diverse verbal reasoning traces through continuous environmental interactions, broadening potential backdoor attack vectors. Yang et al. [815] define two types of backdoor attacks, targeting either the final returned results or the intermediate processes of the attacking agent, and implement the above variations of agent backdoor attacks on two typical agent tasks, including web shopping and tool utilization. Furthermore, DemonAgent [816] decomposes a backdoor into multiple sub-backdoor fragments to poison the agent's tools. Beyond intentional guidance, studies such as BadAgent [817] highlight that backdoor attacks can inadvertently prompt agents to misuse tools for malicious purposes." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.677, + 0.49, + 0.822 + ], + "angle": 0, + "content": "Manipulation. This type of attack refers to directly or indirectly manipulating or altering the tool's returned content to leak sensitive information or carry out malicious actions. AUTOCMD [818] employs a separate LLM, trained on tool-calling datasets and fine-tuned with target-specific examples, to generate and replicate legitimate commands for extracting sensitive information from tools. Meanwhile, Zhao et al. [819] manipulate third-party API outputs by injecting malicious content or omitting critical information, ultimately causing erroneous or biased system behaviors." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.826, + 0.492, + 0.945 + ], + "angle": 0, + "content": "Defenses. Compared to attacks on agent tools, defense mechanisms for secure tool invocation have been less studied. Specifically, AgentGuard [820] employs LLM orchestrators to automatically detect unsafe tool-use workflows and produce safety constraints for secure tool utilization. PrivacyAsst [821] proposes an encryption-based solution by integrating an encryption scheme into the tool using LLM agents to safeguard user privacy and align them" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.393, + 0.923, + 0.468 + ], + "angle": 0, + "content": "with computational security standards. In addition, some works enhance the security of agent systems by leveraging tool invocation, GuardAgent [822] pioneers an approach to verify target agents' trustworthiness by executing guardrail code through API calls during task plan implementation." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.482, + 0.667, + 0.497 + ], + "angle": 0, + "content": "6.2.3 Memory Safety" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.5, + 0.923, + 0.676 + ], + "angle": 0, + "content": "The memory mechanism in LLM agents enables them to retain historical behaviors, thereby enhancing future decision-making capabilities. Typically, agent memory can be categorized into long-term and short-term memory systems. The long-term memory module commonly employs Retrieval-Augmented Generation (RAG) [823, 824] technology to facilitate precise information retrieval, while the short-term memory stores real-time data to support immediate conversational contexts and task execution. While these memory modules significantly improve agent functionality, they simultaneously introduce potential security vulnerabilities, making the system susceptible to malicious attacks." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.69, + 0.606, + 0.704 + ], + "angle": 0, + "content": "6.2.4 Attack" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.709, + 0.922, + 0.753 + ], + "angle": 0, + "content": "Follow the trustworthy issues in [74], we categorize attacks related to memory into three types: Memory Poisoning, Privacy Leakage, and Memory Misuse." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.753, + 0.923, + 0.943 + ], + "angle": 0, + "content": "(I) Memory Poisoning refers to adversarial attacks where malicious data is injected into an agent's long-term memory [313, 825, 826, 827, 828, 829]. When the agent retrieves and utilizes such corrupted memory, it may produce erroneous outputs, misleading responses, or even hazardous actions. For example, PoisonedRAG framework [827] employs a dual optimization approach, simultaneously manipulating both the retrieval and generation pipelines to systematically poison the agent's memory system. AgentPoison [826] introduces an advanced backdoor attack methodology that optimizes trigger patterns and seamlessly integrates them into query formulations, significantly elevating the likelihood of malicious sample retrieval" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "28" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.061, + 0.907, + 0.372 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.274, + 0.378, + 0.723, + 0.394 + ], + "angle": 0, + "content": "Fig. 12: The overview of the safety of LLM-based agent systems." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.42, + 0.492, + 0.581 + ], + "angle": 0, + "content": "while maintaining stealth. (II) Privacy Leakage occurs when attackers exploit the interface between an agent and its long-term memory to extract stored sensitive data [520, 605, 607, 830, 831]. Such breaches may expose user information to malicious third parties, posing significant real-world risks. (II) Memory Misuse refers to the deliberate construction of multi-turn query sequences that systematically circumvent safety protocols by exploiting the retention properties of agent short-term memory [752, 832, 833, 834, 835, 836]. This attack vector enables progressive erosion of defensive measures through iterative interaction patterns." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.591, + 0.188, + 0.605 + ], + "angle": 0, + "content": "6.2.5 Defense" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.608, + 0.491, + 0.77 + ], + "angle": 0, + "content": "To counter these attacks, various defense approaches have been developed to enhance the robustness of memory systems [520, 835, 837, 838, 839]. (I) Detection Detection mechanisms primarily focus on identifying and eliminating malicious content retrieved from long-term memory systems [835, 838, 839?]. (II) Prompt Modification involves strategically rewriting user queries before processing by the agent to enhance response safety [520, 835]. (III) Output Intervention involves real-time monitoring and modification of agent responses prior to delivery to ensure safety and accuracy [825, 840]." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.78, + 0.267, + 0.795 + ], + "angle": 0, + "content": "6.2.6 Environment Safety" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.797, + 0.491, + 0.943 + ], + "angle": 0, + "content": "Agents operate within dynamic and heterogeneous environments, spanning physical and digital domains [841, 842, 843]. Their interaction with these environments is a multistep process [844, 845]. First, agents engage in perception, gathering data from sources like sensors in a physical setup or digital platforms [806]. This perceived data is then analyzed using various algorithms and reasoning mechanisms to identify patterns and potential actions [846]. Based on this analysis, agents take action, which can either directly influence the environment, like an autonomous vehicle making" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.42, + 0.922, + 0.451 + ], + "angle": 0, + "content": "a lane change [847], or modify their own internal state, such as a software agent updating its knowledge base [848]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.453, + 0.923, + 0.629 + ], + "angle": 0, + "content": "However, this interaction is plagued by trustworthiness challenges. There are security risks in every process of interaction with the environment [849]. Agent roles and environmental constraints contribute to risks such as autonomous driving errors [850] and network disruptions [806, 851]. Given the diverse dynamic scenarios and related issues [849, 852, 853], the existing solutions are fragmented and lack a systematic framework. Thus, we will explore trustworthiness and security aspects by categorizing relevant papers according to whether they focus on ensuring safety in the perception, analysis, or action phase of the agent-environment interaction, as illustrated in Figure 10." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.632, + 0.923, + 0.881 + ], + "angle": 0, + "content": "Perception. The perception phase serves as the foundational layer of agent-environment interaction, where agents acquire raw data to interpret their surroundings. However, this phase is inherently vulnerable to risks such as data poisoning, environmental noise, and biased observations. Hudson [841] converts real-time sensory inputs into natural language representations augmented with security validation protocols, employing causal analysis techniques to improve reliability during adversarial perception scenarios. ChatScene [847] develops safety-oriented simulation environments for autonomous systems by converting linguistic commands into executable code compatible with CARLA's simulation architecture. Chen et al. [854] systematically categorize perceptual vulnerabilities in financial AI systems, identifying three primary risk categories: synthetic data generation errors, temporal inconsistency challenges, and susceptibility to engineered input manipulations." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.884, + 0.924, + 0.943 + ], + "angle": 0, + "content": "Reasoning. The reasoning phase transforms raw perceptual data into actionable insights through decision-making models, and knowledge-based inference. This stage is critical to ensure agents act appropriately in dynamic environments," + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "29" + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.057, + 0.489, + 0.205 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.213, + 0.454, + 0.243 + ], + "angle": 0, + "content": "Fig. 13: The overview of agent and environment interactions." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.271, + 0.493, + 0.476 + ], + "angle": 0, + "content": "but introduces unique trustworthiness challenges. Yang et al. [846] develop a temporal safety verification framework using formal logic systems, implementing dual mechanisms for auditing the compliance of safety protocols and filtration of hazardous decisions to meet the requirements of industrial robotics. Agents4PLC [855] establishes an industrial control programming framework that combines automated code synthesis with formal verification processes, integrating RAG [235] and COT [343] to ensure operational integrity. Xiang et al. [822] propose medical AI systems that employ semantic reasoning engines for confidential data protection. Park et al. [845] demonstrate improved threat detection capabilities through simulated organizational communication patterns in anomaly identification systems." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.477, + 0.493, + 0.725 + ], + "angle": 0, + "content": "Action. The action phase represents the culmination of agent-environment interaction, where agents execute decisions to influence their surroundings or update internal states. Trustworthiness at this stage hinges on ensuring that actions are safe, precise, and aligned with intended objectives. Fang et al. [851] reveal the capacity of autonomous systems to exploit digital infrastructure weaknesses through adaptive penetration testing, prompting the development of specialized evaluation frameworks for web agents. Furthermore, researchers develop frameworks to evaluate the truthfulness of web agents. Polaris [856] implements distributed AI architectures to enhance fault tolerance and response accuracy of healthcare interaction systems. La et al. [857] employ linguistic evolution models to simulate adaptive content generation patterns that circumvent automated moderation systems, providing insights for regulatory mechanism improvements." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.747, + 0.251, + 0.763 + ], + "angle": 0, + "content": "6.3 Multi-agent Safety" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.768, + 0.491, + 0.943 + ], + "angle": 0, + "content": "In the previous section, we explored security issues in a single agent setting and this section expands the discussion to multi-agent systems (MAS) [58, 71, 858, 859, 860, 861]. Since a single agent has limited problem-solving capabilities and a relatively narrow perspective, it struggles to conduct a comprehensive analysis of complex problems. In contrast, in MAS, agents can interact through various mechanisms, such as cooperation, competition, and debate, enabling them to solve complex problems more efficiently and effectively [862]. However, these interactions also introduce more complex and diverse security challenges [863]. Consequently, compared to single-agent systems, MASs face more severe" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.923, + 0.099 + ], + "angle": 0, + "content": "and intricate security risks [864]. Similarly, we summarize and discuss existing research from both attack and defense perspectives." + }, + { + "type": "title", + "bbox": [ + 0.506, + 0.109, + 0.606, + 0.123 + ], + "angle": 0, + "content": "6.3.1 Attack" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.127, + 0.924, + 0.375 + ], + "angle": 0, + "content": "In MAS, security threats primarily stem from the propagation of harmful information, hallucinations, and biases through agent interactions, as well as the coordinated planning and optimization of attacks to target security agents within the system. These threats can arise spontaneously through the unintended amplification of misinformation or be deliberately orchestrated by malicious agents. Attack strategies in MAS often integrate multiple traditional techniques, such as prompt injection, jailbreak, and adversarial attacks, while also exploiting emergent properties of agent communication and collaboration. This multi-faceted nature makes MAS attacks more covert, adaptive, and challenging to detect and mitigate. Moreover, the dynamic and autonomous nature of agents allows adversaries to refine their attacks in real-time, further complicating defense mechanisms. Below, we summarize the key research related to these threats." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.375, + 0.924, + 0.609 + ], + "angle": 0, + "content": "Transmissive Attack. It spreads within the MAS like a virus, propagating dangerous and harmful information, including covert malicious content, continuously attacking and compromising the agents in the system. Agent Smith [829] uses adversarial attack techniques, harmful images are generated—appearing benign on the surface but embedding malicious information. These images propagate within the MAS, causing agents to be compromised and posing significant security risks. CORBA [865] introduces Contagious Recursive Blocking Attacks, which exhibit transmissibility across any topological network and can continuously drain computational resources. Lee et al. [600] introduce Prompt Infection in MAS, including data theft, scams, misinformation, and system-wide disruption, which spreads silently. Similarly, Tan et al. [866] use multimodal malicious prompts to infect other secure agents, compromising their security." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.609, + 0.924, + 0.84 + ], + "angle": 0, + "content": "Interference Attack. This attack focuses on how it interferes with and disrupts interactions within the MAS, emphasizing communication disruption and misinformation, which affect information transmission within the MAS and lead to a decline in its defensive capability. NetSafe [867] conducts extensive experiments, analyzing and revealing their structural dependencies and adversarial impacts. At the same time, Huang et al. [868] study how the resilience of MAS varies between different downstream tasks, system structures, and error types; Agent-in-the-Middle [869] manipulates and intercepts information in agent interactions through intermediary agents, disrupting the communication mechanism. The experiment validates the harm caused by the interruption of interactions by intermediary agents through a comparison of MAS with different topological structures." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.841, + 0.923, + 0.943 + ], + "angle": 0, + "content": "Strategic Attack. Strategic attack involves collaboration between agents and strategic optimization of attack methods, aiming to emphasize the cooperation and long-term impact of the attack, making it increasingly dangerous and more destructive. Evil Geniuses [870] modifies system roles, where these roles collaborate to generate malicious prompts. By simulating adversarial attacks and defenses," + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.423, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "30" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.054, + 0.493, + 0.215 + ], + "angle": 0, + "content": "they optimize and evaluate each round of attack behavior, making the attacks increasingly dangerous to target other agents. Amayuelas et al. [871] use adversarial attack techniques to enable harmful agents in the multi-agent system to collaborate in debates to persuade other secure agents. These malicious agents may exploit superior knowledge, larger model sizes, or greater persuasion power to gain an unfair advantage. Ju et al. [872] form a multi-agent community using a two-stage attack method: persuasive injection and knowledge manipulation injection, to induce agents to spread counterfactual and harmful knowledge." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.226, + 0.188, + 0.24 + ], + "angle": 0, + "content": "6.3.2 Defense" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.244, + 0.491, + 0.388 + ], + "angle": 0, + "content": "In response to the various attack methods mentioned above in multi-agent systems, many effective defense strategies have emerged that can be applied to MAS. Currently, many studies focus on forming agent groups to collaborate in joint defense and designing specific defense mechanisms, such as multi-round or multi-layer checks and filtering, to ensure the safety of the responses output by the MAS. Alternatively, defense can be achieved by identifying harmful agents through the propagation of malicious information and eliminating malicious sources." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.389, + 0.492, + 0.606 + ], + "angle": 0, + "content": "Adversarial Defense. This type of defense focuses on attack-defense confrontation, leveraging this adversarial mechanism to develop more effective defense methods or mechanisms to enhance the security of the MAS. LLAMOS [873] employs adversarial defense techniques, where defensive agents and attacking agents engage in counterinteractions, with neither fully defeating the other, thereby enhancing the robustness of the defense and improving the MAS's overall defensive capability. AutoDefense [874] proposes that agents collaborate to complete defense tasks through adversarial prompt filtering, primarily focusing on filtering harmful prompt information from LLMs. In addition to using adversarial techniques for defense, defense can also be achieved by forming a multi-agent group to engage in debates." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.608, + 0.493, + 0.884 + ], + "angle": 0, + "content": "Consensus Defense. To better leverage the advantages of MAS, Consensus Defense utilizes agent collaboration and consensus building for defense, employing voting, debates, and evidence-based reasoning mechanisms to establish a defense system and enhance the security of the MAS. Chern et al. [875] propose that toxicity can be reduced through multi-agent debates, and the widespread use of multi-agent interactions can lead to marginal improvements. Similarly, BlockAgent [876] proposes a Proof-of-Thought consensus mechanism that combines stake-based miner designation with multi-round debate-style voting, enabling BlockAgents to facilitate multi-agent collaboration through a structured workflow. Audit-LLM [877] proposes a pair-wise Evidence-based Multi-agent Debate mechanism, designed to defend against hallucinations by forming a MAS to detect internal threats. This approach is divided into three components: task decomposition, tool construction, and the final execution of the MAS, ultimately reaching consensus through reasoning." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.885, + 0.493, + 0.945 + ], + "angle": 0, + "content": "Structural Defense. Structural Defense treats the MAS as a network structure for planning defense methods, using graph analysis techniques to detect anomalies and resist attacks while incorporating knowledge from other domains" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.925, + 0.141 + ], + "angle": 0, + "content": "to enrich defense strategies in MAS. G-Safeguard [878] compares agents in MAS with various topological structures to nodes in a graph, using Graph Neural Networks (GNN) [879, 880] to detect anomalies in the agents' dialogue graphs and counter adversarial attacks and misinformation within the MAS." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.155, + 0.766, + 0.17 + ], + "angle": 0, + "content": "6.4 Agent Communication Safety" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.173, + 0.925, + 0.525 + ], + "angle": 0, + "content": "As Large Language Model (LLM)-based Agents evolve from isolated entities into interconnected MAS, the mechanisms governing communication between Agents, and their interactions with external environments and tools, have become increasingly critical. Agents exchange information and collaborate through message passing, tool invocation, and environmental interactions; these mechanisms, while essential to system functionality, also expose significant attack surfaces. Early methods [881, 882, 883, 884, 885, 886, 887] of Agent interaction often relied on ad-hoc approaches, such as shared memory [888], API calls [889] or unstructured function calls [890], leading to fragmented systems lacking unified security considerations. To address this challenge and enhance interoperability, standardized communication protocols have emerged. Examples include Anthropic's Model Context Protocol (MCP) [891] for Agent-tool interactions, Google's Agent2Agent (A2A) [892] for enterprise-level Agent collaboration, and the Agent Network Protocol (ANP) [893] for open network interoperability, along with other commonly used protocols [894, 895, 896, 897, 898, 899, 900, 901, 902, 903, 904]. However, the open design and dynamic nature of these communication mechanisms, coupled with the autonomy of the Agent, has exposed new vulnerabilities while enhancing functionality." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.532, + 0.606, + 0.546 + ], + "angle": 0, + "content": "6.4.1 Attack" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.549, + 0.925, + 0.695 + ], + "angle": 0, + "content": "The interconnected nature of MAS, facilitated by numerous communication channels, creates a multifaceted attack surface. While individual Large Language Models (LLMs) possess inherent vulnerabilities, the interactions and communications among Agents introduce novel threats that exploit the system's collaborative dynamics. These threats target various components, including communication channels, content interpretation, and underlying protocols, with examples such as Shadowing Attacks, Naming Attacks, Context Poisoning, and Rug Pulls." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.695, + 0.925, + 0.943 + ], + "angle": 0, + "content": "Attacks Communication Channels. These attacks directly disrupt the transmission and routing of messages in the system, affecting both inter-Agent communications and interactions with external endpoints. For instance, Agent-in-the-Middle (AiTM) attacks [869] specifically target the core communication mechanisms of LLM-MAS. By intercepting and manipulating messages between Agents, these attacks can cause Agents to perform unintended actions, thereby compromising the entire system. Such attacks underscore the critical security vulnerabilities arising from the communication-dependent nature of Agent collaboration. Furthermore, attacks targeting communication channels and transmission processes, such as communication perturbation [905], involve adversaries injecting noise into messages in transit [906] or masquerading as legitimate sources [907], thereby compromising both the efficiency and security of Agent collaboration." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.921, + 0.043 + ], + "angle": 0, + "content": "31" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.493, + 0.213 + ], + "angle": 0, + "content": "Attacks Content. These attacks target the content of messages themselves, leveraging the mechanisms by which Agents process and interpret received information. For example, Prompt Injection involves embedding malicious instructions into data or content that Agents retrieve or receive through communication channels, thereby manipulating the Agent's behavior or decision-making processes. This technique is discussed in several works, such as [600] and [543]. Additionally, [908] explores indirect Prompt Injection within tool-based scenarios, highlighting the varied strategies employed in complex environments." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.215, + 0.493, + 0.404 + ], + "angle": 0, + "content": "Attacks Exploiting Multi-Agent Dynamics. These attacks leverage the interconnected structure, interaction patterns, or collective behavior of communication-driven Multi-Agent Systems (MAS) to amplify their impact or achieve strategic objectives. Contagious attacks (propagation) initiate malicious behavior on a single agent and spread it across the entire network via inter-agent communication [829, 865]. Additionally, malicious agents can coordinate through collective communication to achieve harmful goals, such as replicating malicious instructions across the network by sending replication code or commands, thereby leading to the sharing of legitimate communication keys or identity information with other malicious entities [909]." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.416, + 0.188, + 0.429 + ], + "angle": 0, + "content": "6.4.2 Defense" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.433, + 0.491, + 0.564 + ], + "angle": 0, + "content": "To tackle threats to Agent communication, research proposes a multi-layered defense strategy addressing key points across the communication pipeline, from infrastructure to Agent-level processing. These defenses aim to prevent, detect, or mitigate attacks on channels, content, infrastructure, dynamics, and environmental factors. The strategies integrate into infrastructure and protocol design, individual Agents' message processing, and the collaborative and learning mechanisms of the MAS." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.565, + 0.493, + 0.797 + ], + "angle": 0, + "content": "Protocol Defenses. Protecting the foundation of Agent communication. This includes adopting standardized protocols with built-in security features (encryption, integrity checks, authentication) To counter Agent communication threats, research proposes multi-layered defense strategies targeting different points in the communication pipeline, from the underlying infrastructure to Agent-level message processing. Effective defenses aim to prevent, detect, or mitigate attacks on communication channels, content, infrastructure, such as MCP [891], A2A [892], ANP [893] standards. Establishing managed registries and identity systems for Agent and Tool/Service registration and identity management. Enforcing strong Agent identity verification and access control policies, including JIT credential provisioning. Implementing mechanisms to enforce communication dynamics, and environmental impacts." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.798, + 0.492, + 0.945 + ], + "angle": 0, + "content": "Content Defense. These defenses operate at the agent level, focusing on how agents process received messages and content. This includes input modification and filtering, which preprocess incoming content to neutralize adversarial elements. Agents also employ active defense mechanisms, such as reliability estimation, to assess the trustworthiness of messages based on local context, thereby mitigating the impact of untrusted information. For example, [910] proposed an active defense strategy that utilizes a reliability estimator to judge the credibility of received messages and" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.924, + 0.098 + ], + "angle": 0, + "content": "employs a decomposable message aggregation policy network to reduce the influence of unreliable messages on the final decision." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.12, + 0.727, + 0.136 + ], + "angle": 0, + "content": "6.5 Agent Safety Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.14, + 0.924, + 0.273 + ], + "angle": 0, + "content": "Currently, there is already a substantial body of work evaluating the performance of LLM-based agent systems on different tasks [911, 912, 913, 914, 915]. In this section, we focus on benchmarks designed to assess the security of agents. Broadly speaking, these benchmarks include those that construct datasets and those that use other agents to set up sandbox environments for evaluation, each with distinct assessment priorities and specific scenarios for agent security [314, 916, 917, 918, 919]." + }, + { + "type": "table_caption", + "bbox": [ + 0.574, + 0.285, + 0.855, + 0.3 + ], + "angle": 0, + "content": "TABLE 10: Benchmarks for agent safety." + }, + { + "type": "table", + "bbox": [ + 0.508, + 0.304, + 0.924, + 0.511 + ], + "angle": 0, + "content": "
BenchmarkDynamicLLM asEvaluatorEvaluation Focus
InjectAgent [920]Prompt Injection
AgentDojo [849]Prompt Injection
AgentBackdoorEval [816]Backdoor
RiskAwareBench [921]Embodied Agent
RedCode [916]Coding Agent
S-Eval [917]General
Bells [918]General
AgentSafetyBench [922]General
AgentSecurityBench [?]General
AgentHarm [923]General
R-Judge [314]General
ToolSowrd [924]Tool
PrivacyLens [919]Privacy
ToolEmu [925]Tool
HAIEcosystem [926]General
SafeAgentBench [927]General
JailJudge [928]Jailbreak
" + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.538, + 0.757, + 0.553 + ], + "angle": 0, + "content": "6.5.1 Attack-Specific Benchmarks" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.557, + 0.924, + 0.865 + ], + "angle": 0, + "content": "This type of benchmark focuses on testing the security of an agent when facing specific types of attacks, such as Prompt Injection [600, 929], Backdoor [817, 930, 931], and Jailbreak [874, 932]. Specifically, InjectAgent [920] evaluates LLM agents' vulnerability to indirect prompt injection attacks, measuring behavior safety when tool-integrated agents process malicious instructions embedded in external content, with hacking prompts as an enhancement. A similar work is AgentDojo [849], a dynamic, extensible evaluation framework for assessing prompt injection attacks and defenses in LLM agents by simulating realistic tasks (e.g., email management, banking) with stateful environments and multi-tool interactions under adversarial conditions. As for backdoor attacks, AgentBackdoorEval [816] includes five real-world domains (including Banking-Finance, Medical, and Social Media) with automatically generated prompts, simulated tools, and tailored backdoor triggers to assess attack stealth and effectiveness. Besides, JailJudge [928] introduces a comprehensive jailbreak evaluation benchmark featuring a voting JailJudge MultiAgent, a comprehensive JailJudgeTrain dataset, and a trained Jailjudge Guard." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.879, + 0.765, + 0.894 + ], + "angle": 0, + "content": "6.5.2 Module-Specific Benchmarks" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.899, + 0.924, + 0.945 + ], + "angle": 0, + "content": "Currently, these benchmarks for evaluating the security of a specific module in an agent focus on the invocation of tools [933, 934, 935, 936]. For example, ToolSowrd [924] evaluates" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "32" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.493, + 0.158 + ], + "angle": 0, + "content": "LLM safety in tool learning across three stages (input, execution, output) by designing six adversarial scenarios (e.g., malicious queries, noisy tool misdirection, harmful feedback). ToolEmu [925] employs an LM-emulated sandbox to simulate diverse high-stakes tool executions and scenarios, leveraging GPT-4 for both tool emulation and automatic safety/helpfulness evaluations." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.172, + 0.277, + 0.187 + ], + "angle": 0, + "content": "6.5.3 General Benchmarks" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.192, + 0.493, + 0.644 + ], + "angle": 0, + "content": "In addition to the previously mentioned benchmarks that focus on a specific aspect of agent security, some efforts have developed more comprehensive and holistic evaluation frameworks, taking into account diverse scenarios, different agents, and various offensive and defensive techniques. For instance, AgentSafetyBench [922] assesses LLM agent safety through 2,000 test cases across 349 interactive environments, covering 8 risk categories (e.g., data leaks, physical harm) and 10 failure modes (e.g., incorrect tool calls, risk unawareness), with automated scoring via a fine-tuned model. Similarly, AgentSecurityBench [?] is a comprehensive framework that formalizes and evaluates attacks (e.g., Direct/Indirect Prompt Injection, Memory Poisoning) and defenses across 10 scenarios, 10 agents, and 13 LLM backbones, using 7 evaluation metrics. SafeAgentBench [927] evaluates embodied LLM agents' safety awareness with 750 diverse tasks (detailed, abstract, long-horizon) in SafeAgentEnv simulation environment, leveraging GPT-4 for task generation and dual evaluators (execution-based and semantic). HAIEcosystem [926] evaluates safety through multi-turn interactions between human users (benign/malicious) and AI agents across 132 scenarios, using modular sandbox environment and LLM-based dynamic risk measurement. AgentHarm [923] tests agent robustness by evaluating compliance with 110 explicitly malicious multi-step tasks across 11 harm categories, using synthetic tools and fine-grained grading rubrics. Different form previous benchmarks, RiskAwareBench [921] focuses on embodied agents, evaluating physical risk awareness via four modules: safety tip generation, risky scene generation, plan generation, and automated evaluation." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.66, + 0.32, + 0.675 + ], + "angle": 0, + "content": "6.5.4 LLM Deployment Roadmap" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.68, + 0.491, + 0.737 + ], + "angle": 0, + "content": "In the deployment of LLMs under frozen parameters, the security landscape has evolved through a tightly coupled dynamic among attacks, defenses, and evaluation mechanisms." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.738, + 0.492, + 0.943 + ], + "angle": 0, + "content": "Initially, black-box attacks leveraged the generative capabilities of LLMs themselves to optimize adversarial prompts, often without precise alignment to the decision boundaries. In contrast, gradient-guided white-box methods offer greater control but face inherent limitations due to the discrete nature of token spaces resulting in prompts with weakened semantic fidelity. These attack trends have catalyzed the emergence of prompt-level defense strategies. To counter black-box attacks, recent defenses adopt prompt shaping and system-level constraints to guide and restrict the model's response behavior. For gradient-based attacks, defenses typically apply perplexity-based detection and semantic consistency checks to identify suspicious or adversarial outputs." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.923, + 0.214 + ], + "angle": 0, + "content": "The growing sophistication of defenses reshaped the requirements for evaluation. Static, one-shot rejection mechanisms have proven insufficient in multi-task and multimodal deployments, prompting the development of dynamic strategies such as response rewriting, hierarchical permission control, and consensus-based filtering across multiple models. These strategies demand richer evaluation protocols beyond single metric assessments, shifting toward behavior metrics that capture cross-input consistency, risk under specific task conditions, and adaptability to strategy switching." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.214, + 0.923, + 0.362 + ], + "angle": 0, + "content": "As the attack-defense interaction intensifies, the evaluation itself has become a critical driver of system evolution. Recent frameworks have introduced automated red teaming pipelines, enabling a closed-loop process where jailbreak samples are continually generated, tested against deployed defenses, and fed back to guide both adversarial strategies and defense refinement. This has laid the groundwork for a new paradigm in LLM security research: one where attack, defense, and evaluation are no longer treated in isolation but co-evolve as an interdependent, self-reinforcing system." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.371, + 0.765, + 0.386 + ], + "angle": 0, + "content": "6.5.5 LLM Deployment Perspective" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.389, + 0.923, + 0.579 + ], + "angle": 0, + "content": "(1) Attack strategies will become more structured and semantically aligned. (i) Black-box attacks may evolve through agent-based optimization, enabling sentence-level jailbreaks with clearer intent and higher success rates. (ii) To overcome the limitations of token-level gradient attacks, future work may focus on generating semantically consistent adversarial prompts that are less detectable by perplexity-based defenses. (iii) Open-source models will serve as surrogates for closed models, allowing attackers to replicate decision boundaries before launching white-box attacks. (iv) Variants from fine-tuning pipelines may leak private information through cross-model comparison, introducing version-aware privacy risks." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.579, + 0.923, + 0.693 + ], + "angle": 0, + "content": "(2) Defenses will shift toward adaptive and transferable mechanisms. (i) Prompt-based defenses will evolve into context-aware controllers that adjust behavior based on input semantics and task context. (ii) Generalizable defenses that work across domains and languages will be critical for scalable deployment. (iii) Future systems may support online updates, enabling continuous refinement in response to new threats." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.695, + 0.923, + 0.827 + ], + "angle": 0, + "content": "(3) Evaluation will act as both a diagnostic and driving force. (i) Benchmarks must expand beyond text to cover multimodal inputs and tool-based actions. (ii) Multi-objective evaluation will replace single-metric scoring, balancing safety and utility through trade-off analysis. (iii) Static test sets will give way to adaptive, streaming benchmarks that evolve with attack trends. (iv) Automated red teaming will close the loop, enabling real-time attack generation, evaluation, and defense adjustment." + }, + { + "type": "list", + "bbox": [ + 0.504, + 0.579, + 0.923, + 0.827 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.838, + 0.673, + 0.853 + ], + "angle": 0, + "content": "6.5.6 Agent Roadmap" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.855, + 0.924, + 0.945 + ], + "angle": 0, + "content": "Agent. The evolution of LLM-based agents originated from role-playing paradigms [801, 937, 938, 939], where researchers investigated organizational structures, role allocation mechanisms, and implementation workflows for task-oriented agents in various social contexts. These systematic explorations not only demonstrated agents' potential in" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "33" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.054, + 0.493, + 0.213 + ], + "angle": 0, + "content": "addressing human societal challenges but also spawned interdisciplinary research programs spanning sociology, organizational theory, and psychology. As the field advanced, research focus shifted toward automated agent workflows [795, 860, 940, 941], domain-specific methods for embodied intelligence, and the development of agent capabilities in tool utilization and memory management. Through this progression, agent systems have emerged as a transformative paradigm for automating human social processes, gaining significant recognition as a viable solution for complex societal automation." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.214, + 0.493, + 0.434 + ], + "angle": 0, + "content": "The rapid advancement of agent capabilities and architectures has brought safety concerns to the forefront of academic and industrial research. These challenges span multiple critical dimensions: tool safety, memory security, and the agent's fundamental operational integrity. Inheriting both the capabilities and vulnerabilities of their underlying LLM foundations, agents intrinsically carry these \"genetic\" weaknesses into more complex operational environments. This inheritance makes safety vulnerabilities particularly acute in agent systems, especially when handling sensitive real-world applications involving personal privacy and financial assets. The development of agent technologies has thus become inextricably linked with safety considerations. Recent years (\\(\\sim\\)2023- until now) have witnessed accelerated research in agent safety, focusing on four key frontiers:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.437, + 0.49, + 0.464 + ], + "angle": 0, + "content": "- Agent Brain Security: The core decision-making mechanisms." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.466, + 0.49, + 0.495 + ], + "angle": 0, + "content": "- Tool Invocation Safety: Secure external API and tool usage." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.495, + 0.49, + 0.525 + ], + "angle": 0, + "content": "- Memory Retrieval Protection: Robustness against memory poisoning." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.525, + 0.49, + 0.552 + ], + "angle": 0, + "content": "- Communication Protocol Security: Safe multi-agent interactions." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.437, + 0.49, + 0.552 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.557, + 0.492, + 0.601 + ], + "angle": 0, + "content": "Emerging work has also begun addressing safety challenges in embodied agent scenarios, marking an important expansion of the research domain." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.617, + 0.212, + 0.632 + ], + "angle": 0, + "content": "6.5.7 Perspective" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.636, + 0.49, + 0.665 + ], + "angle": 0, + "content": "We outline potential future research directions for agent systems and analyze their developmental trajectory:" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.666, + 0.493, + 0.812 + ], + "angle": 0, + "content": "(1) Safety of External Agent Modules. Unlike standalone LLMs, agents interact with external modules (e.g., tools, memory), which are exposed to open environments and thus more vulnerable to attacks. Key research challenges include: (i) Tool Safety: Secure tool invocation and API usage to prevent adversarial exploitation. (ii) Memory Protection: Robustness against memory poisoning and unauthorized access, to name just a few. These external interfaces introduce unique attack surfaces, making their security a critical research priority." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.812, + 0.493, + 0.945 + ], + "angle": 0, + "content": "(2) Stability and Reliability of Dynamically Updated Agents via Reinforcement Learning: As reinforcement learning (RL) [35, 942, 943] techniques become increasingly integrated with LLM-based agents, these systems are being deployed in more complex and dynamic environments. While this integration enhances agents' adaptability and intelligence, it also introduces significant risks: (i) Emergent Threats: Advanced RL capabilities may inadvertently enable agents to learn and propagate harmful behaviors or danger-" + }, + { + "type": "list", + "bbox": [ + 0.072, + 0.666, + 0.493, + 0.945 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.923, + 0.098 + ], + "angle": 0, + "content": "ous information. (ii) Dynamic Vulnerability: Continuous online learning increases exposure to adversarial perturbations or reward hacking." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.098, + 0.923, + 0.214 + ], + "angle": 0, + "content": "Critical Research Directions: (i) Safe RL Frameworks: Developing constrained optimization methods to bound agent behavior within ethical and operational guardrails. (ii) Stability-Aware Updates: Designing update protocols that balance adaptability with robustness (e.g., catastrophic forgetting mitigation). (iii) Anomaly Detection: Real-time monitoring of learning trajectories to identify and neutralize hazardous knowledge acquisition." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.214, + 0.923, + 0.287 + ], + "angle": 0, + "content": "(3) Safety of Embodied Agents in Domain-Specific Scenarios: As autonomous agents become increasingly deployed across specialized domains, their safety considerations must account for unique domain-specific vulnerabilities. We list some key challenges as follows:" + }, + { + "type": "text", + "bbox": [ + 0.522, + 0.29, + 0.633, + 0.304 + ], + "angle": 0, + "content": "Web Agents:" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.306, + 0.922, + 0.321 + ], + "angle": 0, + "content": "- HTML/JS injection risks during automated browsing" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.321, + 0.922, + 0.349 + ], + "angle": 0, + "content": "- Secure sandboxing requirements for DOM manipulation" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.35, + 0.922, + 0.38 + ], + "angle": 0, + "content": "- Cross-site scripting (XSS) vulnerabilities in automated form-filling" + }, + { + "type": "list", + "bbox": [ + 0.522, + 0.29, + 0.922, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.382, + 0.715, + 0.396 + ], + "angle": 0, + "content": "- Communication Agents:" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.398, + 0.922, + 0.428 + ], + "angle": 0, + "content": "- Protocol-level attacks (e.g., SIP flooding, WebRTC exploits)" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.428, + 0.922, + 0.457 + ], + "angle": 0, + "content": "- End-to-end encryption requirements for sensitive dialogues" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.457, + 0.864, + 0.472 + ], + "angle": 0, + "content": "- Authentication bypass in voice-based agents" + }, + { + "type": "list", + "bbox": [ + 0.536, + 0.398, + 0.922, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.475, + 0.721, + 0.489 + ], + "angle": 0, + "content": "Robotics Control Agents:" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.491, + 0.897, + 0.505 + ], + "angle": 0, + "content": "- Physical safety constraints in actuator commands" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.506, + 0.842, + 0.519 + ], + "angle": 0, + "content": "Real-time collision avoidance verification" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.52, + 0.821, + 0.535 + ], + "angle": 0, + "content": "- Emergency stop mechanism reliability" + }, + { + "type": "list", + "bbox": [ + 0.536, + 0.491, + 0.897, + 0.535 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.538, + 0.677, + 0.552 + ], + "angle": 0, + "content": "Healthcare Agents:" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.554, + 0.842, + 0.569 + ], + "angle": 0, + "content": "Medical decision audit trail requirements" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.57, + 0.813, + 0.584 + ], + "angle": 0, + "content": "- Drug interaction verification systems" + }, + { + "type": "list", + "bbox": [ + 0.536, + 0.554, + 0.842, + 0.584 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.603, + 0.848, + 0.617 + ], + "angle": 0, + "content": "7 SAFETY IN LLM-BASED APPLICATION" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.622, + 0.923, + 0.812 + ], + "angle": 0, + "content": "In this section, we focus on the security considerations that should be addressed following the commercialization of LLMs into practical applications. With the rapid development of LLMs in fields such as content creation, intelligent interaction, automated programming, medical diagnosis, and financial analysis, LLM-based applications are reshaping industry workflows and business models [944]. However, while LLMs significantly enhance productivity and facilitate human-machine collaboration, their large-scale deployment has also introduced severe security challenges [66]. Ensuring the security, reliability, and compliance of LLM-based applications has become a critical issue in AI research and real-world implementation." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.812, + 0.923, + 0.943 + ], + "angle": 0, + "content": "Truthfulness. Despite their powerful text generation capabilities, LLMs exhibit hallucination phenomena, generating inaccurate, misleading, or entirely fictitious content [945, 946, 947, 948, 949]. Unlike traditional errors, hallucinations are often subtle and linguistically plausible, making them especially dangerous in real-world applications. This challenge is exacerbated in high-stakes domains such as healthcare, law, and finance, where misleading AI-generated information can directly affect human safety and economic" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "34" + }, + { + "type": "image", + "bbox": [ + 0.075, + 0.056, + 0.923, + 0.502 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.509, + 0.911, + 0.569 + ], + "angle": 0, + "content": "Fig. 14: We illustrate the diverse applications of AI in enterprise productivity, content generation, programming, healthcare, finance, customer support, education, and cyber-security. We also highlight critical issues related to truthfulness and privacy, including data leakage, security threats, property rights, fairness, and regulatory compliance, underscoring the need for robust safeguards in AI deployment" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.595, + 0.494, + 0.932 + ], + "angle": 0, + "content": "stability. For example, an LLM-powered clinical assistant may suggest nonexistent diseases or cite unverified treatments, posing risks to patients [739, 950], while financial advisors powered by LLMs might generate persuasive but flawed market forecasts, leading to significant capital misallocation or systemic financial vulnerabilities [951]. Specifically, hallucination is not merely a surface-level output flaw but a systemic artifact rooted in the model's training dynamics and the nature of its data. Specifically, hallucination can stem from three compounding factors: (1) semantic overgeneralization due to exposure to noisy, unverified, or synthetic pretraining corpora; (2) objective misalignment, where maximum-likelihood or reinforcement-based training prioritizes coherence and helpfulness over factual accuracy; and (3) latent distribution shifts between pretraining and deployment-time inputs, particularly under long-tail or adversarial queries [952, 953]. These factors jointly reinforce spurious correlations and amplify unsupported generations, even in otherwise well-aligned models. In sum, hallucination represents a critical bottleneck for the reliable deployment of LLMs. Its mitigation is foundational not only for improving user trust but also for enabling the safe integration of LLMs into high-stakes decision-making" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.595, + 0.588, + 0.609 + ], + "angle": 0, + "content": "workflows." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.615, + 0.924, + 0.865 + ], + "angle": 0, + "content": "Privacy. Data privacy concerns [954] represent another significant challenge in LLM deployment [821, 955]. Training these models requires vast amounts of text data, which may include personal information, corporate secrets, and medical records [956]. If an LLM inadvertently leaks sensitive training data or lacks robust access control mechanisms, users' private information could be exploited or misused. In corporate settings, LLMs may unintentionally expose confidential documents or sensitive customer data, leading to severe compliance and legal risks. Moreover, inference-time attacks [957], such as membership inference and model extraction, can further expose sensitive data by allowing adversaries to infer training set membership or replicate model behavior. Therefore, LLM-based applications must incorporate data protection measures and privacy-preserving techniques like differential privacy and query rate limiting to mitigate information leakage risks." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.869, + 0.923, + 0.945 + ], + "angle": 0, + "content": "Robustness. Prompt injection [543] and jailbreak [636] risks pose additional security threats. Attackers can craft adversarial prompts to bypass security restrictions, causing the model to generate harmful or unauthorized content. For example, in chatbot systems, malicious users could manip" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "35" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.493, + 0.156 + ], + "angle": 0, + "content": "ulate LLMs to generate hate speech, disinformation, or even harmful instructions. Similarly, in AI-powered coding assistants such as GitHub Copilot, attackers may exploit LLMs to produce code with security vulnerabilities, potentially serving as backdoors for future cyberattacks. Developing robust security defenses to prevent LLMs from being misused in real-world applications is crucial for AI safety." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.157, + 0.493, + 0.375 + ], + "angle": 0, + "content": "Copyright. Another pressing concern is intellectual property and copyright protection [958, 959, 960]. LLMs are trained on vast datasets that often include copyrighted texts, source code, and artistic works, raising potential infringement risks. When generating content, LLMs may inadvertently replicate or closely mimic copyrighted material, leading to legal disputes. For instance, AI-powered writing tools might generate articles resembling published works, while coding assistants could produce open-source code snippets without proper licensing [961]. This not only raises concerns about content originality but also introduces legal and ethical dilemmas. Addressing these challenges requires watermarking [962, 963], provenance tracking, and clear copyright attribution mechanisms to ensure responsible AI-generated content management [178]." + }, + { + "type": "text", + "bbox": [ + 0.077, + 0.376, + 0.493, + 0.578 + ], + "angle": 0, + "content": "Ethical and Social Responsibility. Beyond technical concerns, ethical and social responsibility are also critical factors in large-scale LLM deployment. Due to biases in training data, LLMs may generate content that reinforces stereotypes, gender discrimination, or racial biases [964, 965]. In sectors such as hiring, finance, and healthcare, biased AI-generated recommendations could exacerbate existing inequalities and lead to unfair decision-making. Moreover, as LLMs become increasingly integrated into virtual assistants, social media, and news distribution platforms, concerns over AI-generated misinformation, transparency, and accountability are growing. Building fair, transparent, and trustworthy AI governance frameworks is thus essential to mitigating AI-induced social risks." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.579, + 0.492, + 0.782 + ], + "angle": 0, + "content": "Governance. As governments worldwide strengthen AI regulations, LLM-related legal and compliance requirements are evolving rapidly. The EU AI Act classifies LLMs as high-risk AI systems, requiring developers to provide transparency reports and risk control mechanisms [966]. China's Generative AI Regulations mandate AI-generated content to align with ethical standards and undergo governmental scrutiny [967]. In the United States, regulatory discussions emphasize AI transparency and data privacy protections, urging businesses to establish responsible AI practices [968]. These policy developments indicate that LLM-based applications must comply with regional regulations while maintaining a balance between compliance and innovation." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.783, + 0.493, + 0.945 + ], + "angle": 0, + "content": "In summary, while LLM-based applications drive technological progress, they also introduce multifaceted challenges related to misinformation, data privacy, adversarial manipulation, copyright infringement, ethical concerns, and regulatory compliance (refer to Figure 14). These issues not only impact the trustworthiness and legality of AI technologies but also have far-reaching implications for social trust, legal accountability, and business sustainability. Addressing these challenges necessitates a comprehensive approach that integrates privacy protection, content governance, copyright management, ethical safeguards, and regulatory compli" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.923, + 0.084 + ], + "angle": 0, + "content": "ance, alongside collaborative efforts from both academia and industry." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.105, + 0.833, + 0.12 + ], + "angle": 0, + "content": "8 POTENTIAL RESEARCH DIRECTIONS" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.125, + 0.924, + 0.169 + ], + "angle": 0, + "content": "Through a systematic and comprehensive examination of safety across the entire lifecycle of LLMs, we have identified valuable insights for future research:" + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.171, + 0.923, + 0.245 + ], + "angle": 0, + "content": "* Data generation holds immense potential, particularly in ensuring the safety of generated data and automating the data generation process, which is crucial for reliable and robust model training. Reliable data generation is fundamental to the integrity of model training." + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.245, + 0.923, + 0.318 + ], + "angle": 0, + "content": "\\(\\star\\) Post-training phases are becoming increasingly critical. Ensuring secure fine-tuning and alignment of data is a key future direction, closely intertwined with data generation. As concepts proliferate, multi-objective alignment may emerge as a significant area of focus." + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.318, + 0.923, + 0.462 + ], + "angle": 0, + "content": "\\(\\star\\) Model editing and unlearning safety are paramount for efficient model updates and deployment. Current learning efficiencies are suboptimal, and advancements in these technologies could revolutionize how models acquire new knowledge, enabling continuous and efficient learning (potentially even localized memory learning). These techniques might surpass traditional SGD algorithms, but safety measures are essential to prevent models from devolving into malicious entities that contradict human intentions." + }, + { + "type": "text", + "bbox": [ + 0.517, + 0.463, + 0.923, + 0.552 + ], + "angle": 0, + "content": "\\(\\star\\) LLM agents, in the final deployment stage, require robust safety assurances. Ensuring the security of agent tools and agent memory, as well as addressing safety in embodied intelligence scenarios such as web agents and computer agents, are critical areas for further investigation." + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.171, + 0.923, + 0.552 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.573, + 0.649, + 0.587 + ], + "angle": 0, + "content": "9 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.593, + 0.923, + 0.71 + ], + "angle": 0, + "content": "In this survey, we provide a comprehensive analysis of the safety concerns across the entire lifecycle of LLMs, from data preparation and pre-training to post-training, deployment, and commercialization. By introducing the concept of \"fullstack\" safety, we offer an integrated view of the security and safety issues faced by LLMs throughout their development and usage, which addresses gaps in the existing literature that typically focus on specific stages of the lifecycle." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.71, + 0.924, + 0.945 + ], + "angle": 0, + "content": "Through an exhaustive review of over \\(900+\\) papers, we systematically examined and organized the safety issues spanning key stages of LLM production, deployment, and use, including data generation, alignment techniques, model editing, and LLM-based agent systems and LLM-based applications. Our findings highlight the critical vulnerabilities at each stage, such as privacy risks, toxic data, harmful fine-tuning attacks, and deployment challenges. The safety of LLMs is a multifaceted issue requiring careful attention to data integrity, model alignment, and post-deployment security measures. Moreover, we propose promising directions for future research, including improvements in data safety, alignment techniques, and defense mechanisms for LLM-based agents. This work is vital for guiding future efforts to make LLMs safer and more reliable, especially as they become increasingly integral to various industries" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "36" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.493, + 0.099 + ], + "angle": 0, + "content": "and applications. Ensuring robust security across the entire LLM lifecycle is crucial for their responsible and effective deployment in real-world scenarios." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.13, + 0.19, + 0.145 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.155, + 0.493, + 0.243 + ], + "angle": 0, + "content": "[1] L. Ouyang, J. Wu, X. Jiang, D. Almeida, C. Wainwright, P. Mishkin, C. Zhang, S. Agarwal, K. Slama, A. Ray et al., \"Training language models to follow instructions with human feedback,\" Advances in neural information processing systems, vol. 35, pp. 27730-27744, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.243, + 0.493, + 0.316 + ], + "angle": 0, + "content": "[2] H. Touvron, T. Lavril, G. Izacard, X. Martinet, M.-A. Lachaux, T. Lacroix, B. Rozière, N. Goyal, E. Hambro, F. Azhar et al., \"Llama: Open and efficient foundation language models,\" arXiv preprint arXiv:2302.13971, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.316, + 0.493, + 0.36 + ], + "angle": 0, + "content": "[3] J. Bai, S. Bai, Y. Chu, Z. Cui, K. Dang, X. Deng, Y. Fan, W. Ge, Y. Han, F. Huang et al., \"Qwen technical report,\" arXiv preprint arXiv:2309.16609, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.36, + 0.493, + 0.417 + ], + "angle": 0, + "content": "[4] A. Liu, B. Feng, B. Xue, B. Wang, B. Wu, C. Lu, C. Zhao, C. Deng, C. Zhang, C. Ruan et al., \"Deepseek-v3 technical report,\" arXiv preprint arXiv:2412.19437, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.418, + 0.493, + 0.49 + ], + "angle": 0, + "content": "[5] D. Guo, D. Yang, H. Zhang, J. Song, R. Zhang, R. Xu, Q. Zhu, S. Ma, P. Wang, X. Bi et al., \"Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning,\" arXiv preprint arXiv:2501.12948, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.491, + 0.493, + 0.549 + ], + "angle": 0, + "content": "[6] W. X. Zhao, K. Zhou, J. Li, T. Tang, X. Wang, Y. Hou, Y. Min, B. Zhang, J. Zhang, Z. Dong et al., \"A survey of large language models,\" arXiv preprint arXiv:2303.18223, vol. 1, no. 2, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.549, + 0.493, + 0.621 + ], + "angle": 0, + "content": "[7] Y. Chang, X. Wang, J. Wang, Y. Wu, L. Yang, K. Zhu, H. Chen, X. Yi, C. Wang, Y. Wang et al., \"A survey on evaluation of large language models,\" ACM transactions on intelligent systems and technology, vol. 15, no. 3, pp. 1-45, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.622, + 0.493, + 0.695 + ], + "angle": 0, + "content": "[8] M. U. Hadi, R. Qureshi, A. Shah, M. Irfan, A. Zafar, M. B. Shaikh, N. Akhtar, J. Wu, S. Mirjalili et al., \"A survey on large language models: Applications, challenges, limitations, and practical usage,\" Authorea Preprints, vol. 3, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.695, + 0.493, + 0.767 + ], + "angle": 0, + "content": "[9] Y. Yan, S. Wang, J. Huo, J. Ye, Z. Chu, X. Hu, P. S. Yu, C. Gomes, B. Selman, and Q. Wen, \"Position: Multimodal large language models can significantly advance scientific reasoning,\" arXiv preprint arXiv:2502.02871, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.768, + 0.493, + 0.841 + ], + "angle": 0, + "content": "[10] Y. Yan, J. Su, J. He, F. Fu, X. Zheng, Y. Lyu, K. Wang, S. Wang, Q. Wen, and X. Hu, “A survey of mathematical reasoning in the era of multimodal large language model: Benchmark, method & challenges,” arXiv preprint arXiv:2412.11936, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.841, + 0.493, + 0.914 + ], + "angle": 0, + "content": "[11] X. Zou, Y. Yan, X. Hao, Y. Hu, H. Wen, E. Liu, J. Zhang, Y. Li, T. Li, Y. Zheng et al., \"Deep learning for cross-domain data fusion in urban computing: Taxonomy, advances, and outlook,\" Information Fusion, vol. 113, p. 102606, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.074, + 0.914, + 0.493, + 0.944 + ], + "angle": 0, + "content": "[12] Y. Li, X. Zhang, L. Luo, H. Chang, Y. Ren, I. King, and J. Li, “G-refer: Graph retrieval-augmented large" + }, + { + "type": "list", + "bbox": [ + 0.074, + 0.155, + 0.493, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.054, + 0.923, + 0.083 + ], + "angle": 0, + "content": "language model for explainable recommendation,\" arXiv preprint arXiv:2502.12586, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.083, + 0.923, + 0.142 + ], + "angle": 0, + "content": "[13] S. Sun, R. Liu, J. Lyu, J.-W. Yang, L. Zhang, and X. Li, \"A large language model-driven reward design framework via dynamic feedback for reinforcement learning,\" arXiv preprint arXiv:2410.14660, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.142, + 0.923, + 0.228 + ], + "angle": 0, + "content": "[14] S. Sonko, A. O. Adewusi, O. C. Obi, S. Onwusinkwue, and A. Atadoga, “A critical review towards artificial general intelligence: Challenges, ethical considerations, and the path forward,” World Journal of Advanced Research and Reviews, vol. 21, no. 3, pp. 1262-1268, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.228, + 0.923, + 0.302 + ], + "angle": 0, + "content": "[15] S. McLean, G. J. Read, J. Thompson, C. Baber, N. A. Stanton, and P. M. Salmon, \"The risks associated with artificial general intelligence: A systematic review,\" Journal of Experimental & Theoretical Artificial Intelligence, vol. 35, no. 5, pp. 649-663, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.302, + 0.923, + 0.36 + ], + "angle": 0, + "content": "[16] R. Liu, J. Gao, J. Zhao, K. Zhang, X. Li, B. Qi, W. Ouyang, and B. Zhou, \"Can 1b llm surpass 405b llm? rethinking compute-optimal test-time scaling,\" arXiv preprint arXiv:2502.06703, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.36, + 0.923, + 0.433 + ], + "angle": 0, + "content": "[17] J. Ruan, Y. Chen, B. Zhang, Z. Xu, T. Bao, H. Mao, Z. Li, X. Zeng, R. Zhao et al., \"Tptu: Task planning and tool usage of large language model-based ai agents,\" in NeurIPS 2023 Foundation Models for Decision Making Workshop, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.433, + 0.923, + 0.505 + ], + "angle": 0, + "content": "[18] V. Sorin, E. Klang, M. Sklair-Levy, I. Cohen, D. B. Zippel, N. Balint Lahat, E. Konen, and Y. Barash, \"Large language model (chatgpt) as a support tool for breast tumor board,\" NPJ Breast Cancer, vol. 9, no. 1, p. 44, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.505, + 0.923, + 0.579 + ], + "angle": 0, + "content": "[19] R. Yang, L. Song, Y. Li, S. Zhao, Y. Ge, X. Li, and Y. Shan, \"Gpt4tools: Teaching large language model to use tools via self-instruction,\" Advances in Neural Information Processing Systems, vol. 36, pp. 71-995-72007, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.579, + 0.923, + 0.664 + ], + "angle": 0, + "content": "[20] T. Schick, J. Dwivedi-Yu, R. Dessi, R. Raileanu, M. Lomeli, E. Hambro, L. Zettlemoyer, N. Cancedda, and T. Scialom, \"Toolformer: Language models can teach themselves to use tools,\" Advances in Neural Information Processing Systems, vol. 36, pp. 68-59-68-551, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.665, + 0.923, + 0.738 + ], + "angle": 0, + "content": "[21] W. Zhong, L. Guo, Q. Gao, H. Ye, and Y. Wang, \"Memorybank: Enhancing large language models with long-term memory,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 17, 2024, pp. 19724-19731." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.738, + 0.923, + 0.798 + ], + "angle": 0, + "content": "[22] W. Wang, L. Dong, H. Cheng, X. Liu, X. Yan, J. Gao, and F. Wei, \"Augmenting language models with long-term memory,\" Advances in Neural Information Processing Systems, vol. 36, pp. 74530-74543, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.798, + 0.923, + 0.856 + ], + "angle": 0, + "content": "[23] Z. Zhang, X. Bo, C. Ma, R. Li, X. Chen, Q. Dai, J. Zhu, Z. Dong, and J.-R. Wen, \"A survey on the memory mechanism of large language model based agents,\" arXiv preprint arXiv:2404.13501, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.856, + 0.923, + 0.914 + ], + "angle": 0, + "content": "[24] J. Huo, Y. Yan, B. Hu, Y. Yue, and X. Hu, \"Mmneuron: Discovering neuron-level domain-specific interpretation in multimodal large language model,\" arXiv preprint arXiv:2406.11193, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.914, + 0.923, + 0.944 + ], + "angle": 0, + "content": "[25] W. Liu, X. Huang, X. Zeng, X. Hao, S. Yu, D. Li, S. Wang, W. Gan, Z. Liu, Y. Yu et al., \"Toolace: Win" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.033, + 0.922, + 0.043 + ], + "angle": 0, + "content": "37" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.492, + 0.083 + ], + "angle": 0, + "content": "ning the points of llm function calling,\" arXiv preprint arXiv:2409.00920, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.084, + 0.492, + 0.141 + ], + "angle": 0, + "content": "[26] Q. Tang, Z. Deng, H. Lin, X. Han, Q. Liang, B. Cao, and L. Sun, \"Toolalpaca: Generalized tool learning for language models with 3000 simulated cases,\" arXiv preprint arXiv:2306.05301, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.142, + 0.492, + 0.2 + ], + "angle": 0, + "content": "[27] T. Guo, X. Chen, Y. Wang, R. Chang, S. Pei, N. V. Chawla, O. Wiest, and X. Zhang, \"Large language model based multi-agents: A survey of progress and challenges,\" arXiv preprint arXiv:2402.01680, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.2, + 0.492, + 0.272 + ], + "angle": 0, + "content": "[28] L. Wang, C. Ma, X. Feng, Z. Zhang, H. Yang, J. Zhang, Z. Chen, J. Tang, X. Chen, Y. Lin et al., \"A survey on large language model based autonomous agents,\" Frontiers of Computer Science, vol. 18, no. 6, p. 186345, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.272, + 0.492, + 0.346 + ], + "angle": 0, + "content": "[29] Z. Xi, W. Chen, X. Guo, W. He, Y. Ding, B. Hong, M. Zhang, J. Wang, S. Jin, E. Zhou et al., \"The rise and potential of large language model based agents: A survey,\" Science China Information Sciences, vol. 68, no. 2, p. 121101, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.346, + 0.492, + 0.418 + ], + "angle": 0, + "content": "[30] Y. Yan and J. Lee, \"Georeasoner: Reasoning on geospatially grounded context for natural language understanding,\" in Proceedings of the 33rd ACM International Conference on Information and Knowledge Management, 2024, pp. 4163-4167." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.418, + 0.492, + 0.504 + ], + "angle": 0, + "content": "[31] A. Majumdar, K. Yadav, S. Arnaud, J. Ma, C. Chen, S. Silwal, A. Jain, V-P. Berges, T. Wu, J. Vakil et al., \"Where are we in the search for an artificial visual cortex for embodied intelligence?\" Advances in Neural Information Processing Systems, vol. 36, pp. 655-677, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.505, + 0.492, + 0.579 + ], + "angle": 0, + "content": "[32] M. Zhou, H. Dong, H. Song, N. Zheng, W.-H. Chen, and H. Wang, \"Embodied intelligence-based perception, decision-making, and control for autonomous operations of rail transportation,\" IEEE Transactions on Intelligent Vehicles, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.579, + 0.492, + 0.637 + ], + "angle": 0, + "content": "[33] X. Ma, Y. Gao, Y. Wang, R. Wang, X. Wang, Y. Sun, Y. Ding, H. Xu, Y. Chen, Y. Zhao et al., \"Safety at scale: A comprehensive survey of large model safety,\" arXiv preprint arXiv:2502.05206, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.637, + 0.492, + 0.709 + ], + "angle": 0, + "content": "[34] K. Kumar, T. Ashraf, O. Thawakar, R. M. Anwer, H. Cholakkal, M. Shah, M.-H. Yang, P. H. Torr, S. Khan, and F. S. Khan, \"Llm post-training: A deep dive into reasoning large language models,\" arXiv preprint arXiv:2502.21321, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.709, + 0.492, + 0.782 + ], + "angle": 0, + "content": "[35] Z.-Z. Li, D. Zhang, M.-L. Zhang, J. Zhang, Z. Liu, Y. Yao, H. Xu, J. Zheng, P.-J. Wang, X. Chen et al., \"From system 1 to system 2: A survey of reasoning large language models,\" arXiv preprint arXiv:2502.17419, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.782, + 0.492, + 0.855 + ], + "angle": 0, + "content": "[36] Y. Chen, W. Sun, C. Fang, Z. Chen, Y. Ge, T. Han, Q. Zhang, Y. Liu, Z. Chen, and B. Xu, \"Security of language models for code: A systematic literature review,\" ACM Transactions on Software Engineering and Methodology, vol. 1, no. 1, pp. 1-66, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.855, + 0.492, + 0.913 + ], + "angle": 0, + "content": "[37] W. Qu, Y. Zhou, Y. Wu, T. Xiao, B. Yuan, Y. Li, and J. Zhang, \"Prompt inversion attack against collaborative inference of large language models,\" in IEEE S&P, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.913, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[38] J. Wu, S. Yang, R. Zhan, Y. Yuan, L. S. Chao, and D. F. Wong, \"A survey on llm-generated text detection: Ne" + }, + { + "type": "list", + "bbox": [ + 0.076, + 0.054, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.922, + 0.083 + ], + "angle": 0, + "content": "cessity, methods, and future directions,\" Computational Linguistics, pp. 1-66, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.083, + 0.922, + 0.127 + ], + "angle": 0, + "content": "[39] H. Wang, J. Li, H. Wu, E. Hovy, and Y. Sun, \"Pre-trained language models and their applications,\" *Engineering*, vol. 25, pp. 51-65, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.127, + 0.922, + 0.199 + ], + "angle": 0, + "content": "[40] C. Zhou, Q. Li, C. Li, J. Yu, Y. Liu, G. Wang, K. Zhang, C. Ji, Q. Yan, L. He et al., \"A comprehensive survey on pretrained foundation models: A history from bert to chatgpt,\" International Journal of Machine Learning and Cybernetics, pp. 1-65, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.199, + 0.922, + 0.243 + ], + "angle": 0, + "content": "[41] X. Zhang, X. Zhu, and L. Lessard, \"Online data poisoning attacks,\" in Learning for Dynamics and Control. PMLR, 2020, pp. 201-210." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.243, + 0.922, + 0.331 + ], + "angle": 0, + "content": "[42] M. Goldblum, D. Tsipras, C. Xie, X. Chen, A. Schwarzschild, D. Song, A. Madry, B. Li, and T. Goldstein, \"Dataset security for machine learning: Data poisoning, backdoor attacks, and defenses,\" IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 45, no. 2, pp. 1563-1580, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.331, + 0.922, + 0.403 + ], + "angle": 0, + "content": "[43] N. Lukas, A. Salem, R. Sim, S. Tople, L. Wutschitz, and S. Zanella-Béguelin, \"Analyzing leakage of personally identifiable information in language models,\" in 2023 IEEE Symposium on Security and Privacy (SP). IEEE, 2023, pp. 346-363." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.403, + 0.922, + 0.505 + ], + "angle": 0, + "content": "[44] W. Sun, Y. Chen, C. Fang, Y. Feng, Y. Xiao, A. Guo, Q. Zhang, Y. Liu, B. Xu, and Z. Chen, \"Eliminating backdoors in neural code models for secure code understanding,\" in Proceedings of the 33rd ACM International Conference on the Foundations of Software Engineering. Trondheim, Norway: ACM, Mon 23 - Fri 27 June 2025, pp. 1-23." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.505, + 0.922, + 0.578 + ], + "angle": 0, + "content": "[45] H. R. Kirk, B. Vidgen, P. Röttger, and S. A. Hale, \"The benefits, risks and bounds of personalizing the alignment of large language models to individuals,\" Nature Machine Intelligence, vol. 6, no. 4, pp. 383-392, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.578, + 0.922, + 0.651 + ], + "angle": 0, + "content": "[46] Z. Zhou, H. Yu, X. Zhang, R. Xu, F. Huang, and Y. Li, \"How alignment and jailbreak work: Explain llm safety through intermediate hidden states,\" in Findings of the Association for Computational Linguistics: EMNLP 2024, 2024, pp. 2461-2488." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.651, + 0.922, + 0.724 + ], + "angle": 0, + "content": "[47] X. Qi, Y. Zeng, T. Xie, P.-Y. Chen, R. Jia, P. Mittal, and P. Henderson, \"Fine-tuning aligned language models compromises safety, even when users do not intend to!\" in ICLR, 2024. [Online]. Available: https://openreview.net/forum?id=hTEGyKf0dZ" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.724, + 0.922, + 0.811 + ], + "angle": 0, + "content": "[48] X. Qi, A. Panda, K. Lyu, X. Ma, S. Roy, A. Beirami, P. Mittal, and P. Henderson, \"Safety alignment should be made more than just a few tokens deep,\" in The Thirteen International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=6Mxhg9PtDE" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.811, + 0.922, + 0.885 + ], + "angle": 0, + "content": "[49] D. Halawi, A. Wei, E. Wallace, T. T. Wang, N. Hagh-talab, and J. Steinhardt, \"Covert malicious finetuning: Challenges in safeguarding LLM adaptation,\" in Proceedings of the 41st International Conference on Machine Learning. PMLR, 2024, pp. 17298-17312." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.885, + 0.922, + 0.928 + ], + "angle": 0, + "content": "[50] W. Hawkins, B. Mittelstadt, and C. Russell, \"The effect of fine-tuning on language model toxicity,\" in Neurips Safe Generative AI Workshop 2024, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.928, + 0.922, + 0.944 + ], + "angle": 0, + "content": "[51] J. Huang and J. Zhang, \"A survey on evaluation of" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.922, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "38" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.49, + 0.083 + ], + "angle": 0, + "content": "multimodal large language models,\" arXiv preprint arXiv:2408.15769, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.083, + 0.492, + 0.142 + ], + "angle": 0, + "content": "[52] P. Röttger, F. Pernisi, B. Vidgen, and D. Hovy, \"Safetyprompts: a systematic review of open datasets for evaluating and improving large language model safety,\" arXiv preprint arXiv:2404.05399, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.142, + 0.492, + 0.199 + ], + "angle": 0, + "content": "[53] Y. Dong, R. Mu, Y. Zhang, S. Sun, T. Zhang, C. Wu, G. Jin, Y. Qi, J. Hu, J. Meng et al., \"Safeguarding large language models: A survey,\" arXiv preprint arXiv:2406.02622, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.2, + 0.492, + 0.258 + ], + "angle": 0, + "content": "[54] Y. Wang, Y. Pan, Q. Zhao, Y. Deng, Z. Su, L. Du, and T. H. Luan, \"Large model agents: State-of-the-art, cooperation paradigms, security and privacy, and future trends,\" arXiv preprint arXiv:2409.14457, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.259, + 0.492, + 0.316 + ], + "angle": 0, + "content": "[55] G. Zhang, K. Chen, G. Wan, H. Chang, H. Cheng, K. Wang, S. Hu, and L. Bai, \"Evoflow: Evolving diverse agentic workflows on the fly,\" arXiv preprint arXiv:2502.07373, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.317, + 0.492, + 0.36 + ], + "angle": 0, + "content": "[56] G. Zhang, L. Niu, J. Fang, K. Wang, L. Bai, and X. Wang, \"Multi-agent architecture search via agentic supernet,\" arXiv preprint arXiv:2502.04180, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.36, + 0.492, + 0.432 + ], + "angle": 0, + "content": "[57] G. Zhang, Y. Yue, Z. Li, S. Yun, G. Wan, K. Wang, D. Cheng, J. X. Yu, and T. Chen, \"Cut the crap: An economical communication pipeline for llm-based multi-agent systems,\" arXiv preprint arXiv:2410.02506, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.433, + 0.492, + 0.491 + ], + "angle": 0, + "content": "[58] Y. Yue, G. Zhang, B. Liu, G. Wan, K. Wang, D. Cheng, and Y. Qi, \"Masrouter: Learning to route llms for multi-agent systems,\" 2025. [Online]. Available: https://arxiv.org/abs/2502.11133" + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.491, + 0.492, + 0.564 + ], + "angle": 0, + "content": "[59] Z. Liang, Y. Xu, Y. Hong, P. Shang, Q. Wang, Q. Fu, and K. Liu, \"A survey of multimodel large language models,\" in Proceedings of the 3rd International Conference on Computer, Artificial Intelligence and Control Engineering, 2024, pp. 405-409." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.565, + 0.492, + 0.622 + ], + "angle": 0, + "content": "[60] S. Zhang, L. Dong, X. Li, S. Zhang, X. Sun, S. Wang, J. Li, R. Hu, T. Zhang, F. Wu et al., \"Instruction tuning for large language models: A survey,\" arXiv preprint arXiv:2308.10792, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.622, + 0.492, + 0.695 + ], + "angle": 0, + "content": "[61] H. Zhao, H. Chen, F. Yang, N. Liu, H. Deng, H. Cai, S. Wang, D. Yin, and M. Du, \"Explainability for large language models: A survey,\" ACM Transactions on Intelligent Systems and Technology, vol. 15, no. 2, pp. 1-38, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.695, + 0.492, + 0.752 + ], + "angle": 0, + "content": "[62] T. Shen, R. Jin, Y. Huang, C. Liu, W. Dong, Z. Guo, X. Wu, Y. Liu, and D. Xiong, \"Large language model alignment: A survey,\" arXiv preprint arXiv:2309.15025, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.753, + 0.492, + 0.84 + ], + "angle": 0, + "content": "[63] M. A. K. Raiaan, M. S. H. Mukta, K. Fatema, N. M. Fahad, S. Sakib, M. M. J. Mim, J. Ahmad, M. E. Ali, and S. Azam, \"A review on large language models: Architectures, applications, taxonomies, open issues and challenges,\" IEEE access, vol. 12, pp. 26839-26874, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.841, + 0.492, + 0.885 + ], + "angle": 0, + "content": "[64] K. S. Kalyan, \"A survey of gpt-3 family large language models including chatgpt and gpt-4,\" Natural Language Processing Journal, vol. 6, p. 100048, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.885, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[65] E. Shayegani, M. A. A. Mamun, Y. Fu, P. Zaree, Y. Dong, and N. Abu-Ghazaleh, \"Survey of vulnerabilities in large language models revealed by adversarial attacks,\" arXiv preprint arXiv:2310.10844, 2023." + }, + { + "type": "list", + "bbox": [ + 0.076, + 0.054, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.113 + ], + "angle": 0, + "content": "[66] Y. Yao, J. Duan, K. Xu, Y. Cai, Z. Sun, and Y. Zhang, \"A survey on large language model (llm) security and privacy: The good, the bad, and the ugly,\" High-Confidence Computing, p. 100211, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.113, + 0.923, + 0.17 + ], + "angle": 0, + "content": "[67] L. Qin, Q. Chen, Y. Zhou, Z. Chen, Y. Li, L. Liao, M. Li, W. Che, and P. S. Yu, \"Multilingual large language model: A survey of resources, taxonomy and frontiers,\" arXiv preprint arXiv:2404.04925, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.17, + 0.923, + 0.243 + ], + "angle": 0, + "content": "[68] M. U. Hadi, R. Qureshi, A. Shah, M. Irfan, A. Zafar, M. B. Shaikh, N. Akhtar, J. Wu, S. Mirjalili et al., \"Large language models: a comprehensive survey of its applications, challenges, limitations, and future prospects,\" Authorea Preprints, vol. 1, pp. 1-26, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.243, + 0.923, + 0.302 + ], + "angle": 0, + "content": "[69] L. Sun, Y. Huang, H. Wang, S. Wu, Q. Zhang, C. Gao, Y. Huang, W. Lyu, Y. Zhang, X. Li et al., \"Trustllm: Trustworthiness in large language models,\" arXiv preprint arXiv:2401.05561, vol. 3, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.302, + 0.923, + 0.346 + ], + "angle": 0, + "content": "[70] B. C. Das, M. H. Amini, and Y. Wu, \"Security and privacy challenges of large language models: A survey,\" ACM Computing Surveys, vol. 57, no. 6, pp. 1-39, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.346, + 0.923, + 0.403 + ], + "angle": 0, + "content": "[71] F. He, T. Zhu, D. Ye, B. Liu, W. Zhou, and P. S. Yu, \"The emerged security and privacy of llm agent: A survey with case studies,\" arXiv preprint arXiv:2407.19354, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.403, + 0.923, + 0.462 + ], + "angle": 0, + "content": "[72] G. Tie, Z. Zhao, D. Song, F. Wei, R. Zhou, Y. Dai, W. Yin, Z. Yang, J. Yan, Y. Su et al., \"A survey on post-training of large language models,\" arXiv preprint arXiv:2503.06072, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.462, + 0.923, + 0.535 + ], + "angle": 0, + "content": "[73] Y. Huang, C. Gao, S. Wu, H. Wang, X. Wang, Y. Zhou, Y. Wang, J. Ye, J. Shi, Q. Zhang et al., \"On the trustworthiness of generative foundation models: Guideline, assessment, and perspective,\" arXiv preprint arXiv:2502.14296, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.535, + 0.923, + 0.593 + ], + "angle": 0, + "content": "[74] M. Yu, F. Meng, X. Zhou, S. Wang, J. Mao, L. Pang, T. Chen, K. Wang, X. Li, Y. Zhang et al., \"A survey on trustworthy llm agents: Threats and countermeasures,\" arXiv preprint arXiv:2503.09648, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.593, + 0.923, + 0.739 + ], + "angle": 0, + "content": "[75] X. Ma, Y. Gao, Y. Wang, R. Wang, X. Wang, Y. Sun, Y. Ding, H. Xu, Y. Chen, Y. Zhao, H. Huang, Y. Li, J. Zhang, X. Zheng, Y. Bai, Z. Wu, X. Qiu, J. Zhang, Y. Li, J. Sun, C. Wang, J. Gu, B. Wu, S. Chen, T. Zhang, Y. Liu, M. Gong, T. Liu, S. Pan, C. Xie, T. Pang, Y. Dong, R. Jia, Y. Zhang, S. Ma, X. Zhang, N. Gong, C. Xiao, S. Erfani, B. Li, M. Sugiyama, D. Tao, J. Bailey, and Y.-G. Jiang, \"Safety at scale: A comprehensive survey of large model safety,\" 2025. [Online]. Available: https://arxiv.org/abs/2502.05206" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.739, + 0.923, + 0.812 + ], + "angle": 0, + "content": "[76] Y. Huang, L. Sun, H. Wang, S. Wu, Q. Zhang, Y. Li, C. Gao, Y. Huang, W. Lyu, Y. Zhang et al., \"Position: Trustllm: Trustworthiness in large language models,\" in International Conference on Machine Learning. PMLR, 2024, pp. 20166-20270." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.812, + 0.923, + 0.869 + ], + "angle": 0, + "content": "[77] Z. Dong, Z. Zhou, C. Yang, J. Shao, and Y. Qiao, \"Attacks, defenses and evaluations for llm conversation safety: A survey,\" arXiv preprint arXiv:2402.09283, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.87, + 0.923, + 0.943 + ], + "angle": 0, + "content": "[78] G. Penedo, Q. Malartic, D. Hesslow, R. Cojocaru, A. Cappelli, H. Alobeidli, B. Pannier, E. Almazrouei, and J. Launay, \"The refined web dataset for falcon llm: outperforming curated corpora with web data, and web data only,\" arXiv preprint arXiv:2306.01116, 2023." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "39" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.127 + ], + "angle": 0, + "content": "[79] L. Soldaini, R. Kinney, A. Bhagia, D. Schwenk, D. Atkinson, R. Authur, B. Bogin, K. Chandu, J. Dumas, Y. Elazar et al., \"Dolma: An open corpus of three trillion tokens for language model pretraining research,\" arXiv preprint arXiv:2402.00159, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.127, + 0.492, + 0.184 + ], + "angle": 0, + "content": "[80] J. Kaddour, J. Harris, M. Mozes, H. Bradley, R. Raileanu, and R. McHardy, \"Challenges and applications of large language models,\" arXiv preprint arXiv:2307.10169, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.185, + 0.492, + 0.272 + ], + "angle": 0, + "content": "[81] W. Sun, Y. Chen, G. Tao, C. Fang, X. Zhang, Q. Zhang, and B. Luo, \"Backdooring neural code search,\" in Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics. Toronto, Canada: Association for Computational Linguistics, July 9-14 2023, pp. 9692-9708." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.273, + 0.492, + 0.375 + ], + "angle": 0, + "content": "[82] W. Sun, Y. Chen, M. Yuan, C. Fan, Z. Chen, C. Wang, Y. Liu, B. Xu, and Z. Chen, \"Show me your code! kill code poisoning: A lightweight method based on code naturalness,\" in Proceedings of the IEEE/ACM 47th International Conference on Software Engineering. Ottawa, Ontario, Canada: IEEE Computer Society, Sun 27 April - Sat 3 May 2025, pp. 1-13." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.375, + 0.492, + 0.447 + ], + "angle": 0, + "content": "[83] N. Carlini, M. Jagielski, C. A. Choquette-Choo, D. Paleka, W. Pearce, H. Anderson, A. Terzis, K. Thomas, and F. Tramèr, \"Poisoning web-scale training datasets is practical,\" in 2024 IEEE Symposium on Security and Privacy (SP). IEEE, 2024, pp. 407-425." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.447, + 0.492, + 0.505 + ], + "angle": 0, + "content": "[84] Y. Zhang, J. Rando, I. Evtimov, J. Chi, E. M. Smith, N. Carlini, F. Tramér, and D. Ippolito, \"Persistent pre-training poisoning of llms,\" arXiv preprint arXiv:2410.13722, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.506, + 0.492, + 0.55 + ], + "angle": 0, + "content": "[85] E. Wallace, T. Z. Zhao, S. Feng, and S. Singh, \"Concealed data poisoning attacks on nlp models,\" arXiv preprint arXiv:2010.12563, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.55, + 0.492, + 0.607 + ], + "angle": 0, + "content": "[86] B. Yan, K. Li, M. Xu, Y. Dong, Y. Zhang, Z. Ren, and X. Cheng, \"On protecting the data privacy of large language models (llms): A survey,\" arXiv preprint arXiv:2403.05156, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.608, + 0.492, + 0.666 + ], + "angle": 0, + "content": "[87] N. Kandpal, E. Wallace, and C. Raffel, \"Deduplicating training data mitigates privacy risks in language models,\" in International Conference on Machine Learning. PMLR, 2022, pp. 10697-10707." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.666, + 0.492, + 0.724 + ], + "angle": 0, + "content": "[88] N. Carlini, D. Ippolito, M. Jagielski, K. Lee, F. Tramer, and C. Zhang, “Quantifying memorization across neural language models,” in The Eleventh International Conference on Learning Representations, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.724, + 0.492, + 0.782 + ], + "angle": 0, + "content": "[89] C. Arnett, E. Jones, I. P. Yamshchikov, and P.-C. Langlais, \"Toxicity of the commons: Curating open-source pre-training data,\" arXiv preprint arXiv:2410.22587, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.782, + 0.492, + 0.841 + ], + "angle": 0, + "content": "[90] K. Lee, D. Ippolito, A. Nystrom, C. Zhang, D. Eck, C. Callison-Burch, and N. Carlini, “Deduplicating training data makes language models better,” arXiv preprint arXiv:2107.06499, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.841, + 0.492, + 0.885 + ], + "angle": 0, + "content": "[91] Y. Li, Y. Jiang, Z. Li, and S. Xia, \"Backdoor learning: A survey.\" IEEE Transactions on Neural Networks and Learning Systems, vol. 35, no. 1, pp. 5-22, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.885, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[92] Y. Zeng, M. Pan, H. Jahagirdar, M. Jin, L. Lyu, and R. Jia, \"How to sift out a clean data subset in the presence of data poisoning?\" arXiv preprint arXiv:2210.06516, 2022." + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.112 + ], + "angle": 0, + "content": "[93] M. Pan, Y. Zeng, L. Lyu, X. Lin, and R. Jia, “{ASSET}: Robust backdoor data detection across a multiplicity of deep learning paradigms,” in 32nd USENIX Security Symposium (USENIX Security 23), 2023, pp. 2725–2742." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.112, + 0.923, + 0.17 + ], + "angle": 0, + "content": "[94] Z. Zhang, L. Lyu, W. Wang, L. Sun, and X. Sun, \"How to inject backdoors with better consistency: Logit anchoring on clean data,\" in International Conference on Learning Representations, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.17, + 0.923, + 0.214 + ], + "angle": 0, + "content": "[95] Z. Zhang, L. Lyu, X. Ma, C. Wang, and X. Sun, \"Fine-mixing: Mitigating backdoors in fine-tuned language models,\" arXiv preprint arXiv:2210.09545, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.214, + 0.923, + 0.287 + ], + "angle": 0, + "content": "[96] X. Sun, X. Li, Y. Meng, X. Ao, L. Lyu, J. Li, and T. Zhang, \"Defending against backdoor attacks in natural language generation,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 37, no. 4, 2023, pp. 5257-5265." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.287, + 0.923, + 0.404 + ], + "angle": 0, + "content": "[97] S. Longpre, G. Yauney, E. Reif, K. Lee, A. Roberts, B. Zoph, D. Zhou, J. Wei, K. Robinson, D. Mimno et al., \"A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity,\" in Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), 2024, pp. 3245-3276." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.404, + 0.923, + 0.447 + ], + "angle": 0, + "content": "[98] S. Neel and P. Chang, \"Privacy issues in large language models: A survey,\" arXiv preprint arXiv:2312.06717, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.447, + 0.923, + 0.492 + ], + "angle": 0, + "content": "[99] X. Wu, R. Duan, and J. Ni, \"Unveiling security, privacy, and ethical concerns of chatgpt,\" Journal of Information and Intelligence, vol. 2, no. 2, pp. 102-115, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.491, + 0.923, + 0.55 + ], + "angle": 0, + "content": "[100] M. Gupta, C. Akiri, K. Aryal, E. Parker, and L. Praharaj, \"From chatgpt to threatgpt: Impact of generative ai in cybersecurity and privacy,\" IEEE Access, vol. 11, pp. 80218-80245, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.55, + 0.923, + 0.608 + ], + "angle": 0, + "content": "[101] M. Miranda, E. S. Ruzzetti, A. Santilli, F. M. Zanzotto, S. Bratières, and E. Rodolà, “Preserving privacy in large language models: A survey on current threats and solutions,” arXiv preprint arXiv:2408.05212, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.608, + 0.923, + 0.68 + ], + "angle": 0, + "content": "[102] Q. Zhang, H. Qiu, D. Wang, Y. Li, T. Zhang, W. Zhu, H. Weng, L. Yan, and C. Zhang, “A benchmark for semantic sensitive information in llms outputs,” in The Thirteenth International Conference on Learning Representations, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.68, + 0.923, + 0.724 + ], + "angle": 0, + "content": "[103] S. Kim, S. Yun, H. Lee, M. Gubri, S. Yoon, and S. J. Oh, \"Propile: C,\" Advances in Neural Information Processing Systems, vol. 36, pp. 20750-20762, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.724, + 0.923, + 0.768 + ], + "angle": 0, + "content": "[104] H. Li, D. Guo, W. Fan, M. Xu, J. Huang, F. Meng, and Y. Song, \"Multi-step jailbreaking privacy attacks on chatgpt,\" arXiv preprint arXiv:2304.05197, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.768, + 0.923, + 0.84 + ], + "angle": 0, + "content": "[105] M. S. Ozdayi, C. Peris, J. FitzGerald, C. Dupuy, J. Majmudar, H. Khan, R. Parikh, and R. Gupta, \"Controlling the extraction of memorized data from large language models via prompt-tuning,\" arXiv preprint arXiv:2305.11759, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.84, + 0.923, + 0.913 + ], + "angle": 0, + "content": "[106] N. Carlini, C. Liu, U. Erlingsson, J. Kos, and D. Song, \"The secret sharer: Evaluating and testing unintended memorization in neural networks,\" in 28th USENIX security symposium (USENIX security 19), 2019, pp. 267-284." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.913, + 0.923, + 0.943 + ], + "angle": 0, + "content": "[107] M. Nasr, N. Carlini, J. Hayase, M. Jagielski, A. F. Cooper, D. Ippolito, C. A. Choquette-Choo, E. Wallace," + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "40" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.492, + 0.098 + ], + "angle": 0, + "content": "F. Tramér, and K. Lee, \"Scalable extraction of training data from (production) language models,\" arXiv preprint arXiv:2311.17035, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.098, + 0.492, + 0.171 + ], + "angle": 0, + "content": "[108] N. Carlini, F. Tramer, E. Wallace, M. Jagielski, A. Herbert-Voss, K. Lee, A. Roberts, T. Brown, D. Song, U. Erlingsson et al., \"Extracting training data from large language models,\" in 30th USENIX security symposium (USENIX Security 21), 2021, pp. 2633-2650." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.171, + 0.492, + 0.228 + ], + "angle": 0, + "content": "[109] Y. Bai, G. Pei, J. Gu, Y. Yang, and X. Ma, \"Special characters attack: Toward scalable training data extraction from large language models,\" arXiv preprint arXiv:2405.05990, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.228, + 0.492, + 0.3 + ], + "angle": 0, + "content": "[110] Z. Zhou, J. Xiang, C. Chen, and S. Su, “Quantifying and analyzing entity-level memorization in large language models,” in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 17, 2024, pp. 19741-19749." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.301, + 0.492, + 0.374 + ], + "angle": 0, + "content": "[111] X. Yang, Z. Wen, W. Qu, Z. Chen, Z. Xiang, B. Chen, and H. Yao, “Memorization and privacy risks in domain-specific large language models,” in ICLR 2024 Workshop on Reliable and Responsible Foundation Models, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.374, + 0.492, + 0.433 + ], + "angle": 0, + "content": "[112] R. Shokri, M. Stronati, C. Song, and V. Shmatikov, \"Membership inference attacks against machine learning models,\" in 2017 IEEE symposium on security and privacy (SP). IEEE, 2017, pp. 3-18." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.433, + 0.492, + 0.492 + ], + "angle": 0, + "content": "[113] H. Hu, Z. Salcic, L. Sun, G. Dobbie, P. S. Yu, and X. Zhang, \"Membership inference attacks on machine learning: A survey,\" ACM Computing Surveys (CSUR), vol. 54, no. 11s, pp. 1-37, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.491, + 0.492, + 0.55 + ], + "angle": 0, + "content": "[114] N. Carlini, S. Chien, M. Nasr, S. Song, A. Terzis, and F. Tramer, \"Membership inference attacks from first principles,\" in 2022 IEEE symposium on security and privacy (SP). IEEE, 2022, pp. 1897-1914." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.55, + 0.492, + 0.623 + ], + "angle": 0, + "content": "[115] J. Ye, A. Maddi, S. K. Murakonda, V. Bindschaedler, and R. Shokri, \"Enhanced membership inference attacks against machine learning models,\" in Proceedings of the 2022 ACM SIGSAC Conference on Computer and Communications Security, 2022, pp. 3093-3106." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.622, + 0.492, + 0.68 + ], + "angle": 0, + "content": "[116] J. Zhang, D. Das, G. Kamath, and F. Tramère, \"Membership inference attacks cannot prove that a model was trained on your data,\" arXiv preprint arXiv:2409.19798, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.681, + 0.492, + 0.753 + ], + "angle": 0, + "content": "[117] M. Duan, A. Suri, N. Mireshghallah, S. Min, W. Shi, L. Zettlemoyer, Y. Tsvetkov, Y. Choi, D. Evans, and H. Hajishirzi, \"Do membership inference attacks work on large language models?\" arXiv preprint arXiv:2402.07841, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.754, + 0.492, + 0.812 + ], + "angle": 0, + "content": "[118] M. Meeus, I. Shilov, S. Jain, M. Faysse, M. Rei, and Y.-A. de Montjoye, \"Sok: Membership inference attacks on llms are rushing nowhere (and how to fix it),\" arXiv preprint arXiv:2406.17975, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.812, + 0.492, + 0.898 + ], + "angle": 0, + "content": "[119] Y. He, B. Li, Y. Wang, M. Yang, J. Wang, H. Hu, and X. Zhao, \"Is difficulty calibration all we need? towards more practical membership inference attacks,\" in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 1226-1240." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.899, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[120] Y. He, B. Li, L. Liu, Z. Ba, W. Dong, Y. Li, Z. Qin, K. Ren, and C. Chen, \"Towards label-only membership inference attack against pre-trained large lan" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.841, + 0.069 + ], + "angle": 0, + "content": "guage models,\" in USENIX Security, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.069, + 0.923, + 0.142 + ], + "angle": 0, + "content": "[121] J. Ren, K. Chen, C. Chen, V. Sehwag, Y. Xing, J. Tang, and L. Lyu, \"Self-comparison for dataset-level membership inference in large (vision-) language model,\" in Proceedings of the ACM on Web Conference 2025, 2025, pp. 910-920." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.142, + 0.923, + 0.2 + ], + "angle": 0, + "content": "[122] A. Albalak, Y. Elazar, S. M. Xie, S. Longpre, N. Lambert, X. Wang, N. Muennighoff, B. Hou, L. Pan, H. Jeong et al., \"A survey on data selection for language models,\" arXiv preprint arXiv:2402.16827, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.2, + 0.923, + 0.258 + ], + "angle": 0, + "content": "[123] P. Maini, S. Goyal, D. Sam, A. Robey, Y. Savani, Y. Jiang, A. Zou, Z. C. Lipton, and J. Z. Kolter, \"Safety pretraining: Toward the next generation of safe ai,\" arXiv preprint arXiv:2504.16980, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.259, + 0.923, + 0.317 + ], + "angle": 0, + "content": "[124] A. Hurst, A. Lerer, A. P. Goucher, A. Perelman, A. Ramesh, A. Clark, A. Ostrow, A. Welihinda, A. Hayes, A. Radford et al., \"Gpt-4o system card,\" arXiv preprint arXiv:2410.21276, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.317, + 0.923, + 0.404 + ], + "angle": 0, + "content": "[125] S. Li, F. Liu, L. Cui, J. Lu, Q. Xiao, X. Yang, P. Liu, K. Sun, Z. Ma, and X. Wang, \"Safe planner: Empowering safety awareness in large pre-trained models for robot task planning,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 39, no. 14, 2025, pp. 14619-14627." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.404, + 0.923, + 0.462 + ], + "angle": 0, + "content": "[126] J. O'Neill, S. Subramanian, E. Lin, A. Satish, and V. Mugunthan, \"Guardformer: Guardrail instruction pretraining for efficient safeguarding,\" in Neurips Safe Generative AI Workshop 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.462, + 0.923, + 0.521 + ], + "angle": 0, + "content": "[127] T. Huang, S. Hu, F. Ilhan, S. F. Tekin, and L. Liu, \"Harmful fine-tuning attacks and defenses for large language models: A survey,\" arXiv preprint arXiv:2409.18169, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.521, + 0.923, + 0.579 + ], + "angle": 0, + "content": "[128] M. Shu, J. Wang, C. Zhu, J. Geiping, C. Xiao, and T. Goldstein, \"On the exploitability of instruction tuning,\" Advances in Neural Information Processing Systems, vol. 36, pp. 61-836-61-856, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.579, + 0.923, + 0.637 + ], + "angle": 0, + "content": "[129] J. Xu, M. D. Ma, F. Wang, C. Xiao, and M. Chen, \"Instructions as backdoors: Backdoor vulnerabilities of instruction tuning for large language models,\" arXiv preprint arXiv:2305.14710, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.637, + 0.923, + 0.708 + ], + "angle": 0, + "content": "[130] J. Yan, V. Yadav, S. Li, L. Chen, Z. Tang, H. Wang, V. Srinivasan, X. Ren, and H. Jin, \"Backdooring instruction-tuned large language models with virtual prompt injection,\" arXiv preprint arXiv:2307.16888, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.709, + 0.923, + 0.783 + ], + "angle": 0, + "content": "[131] H. Yao, J. Lou, and Z. Qin, \"Poisonprompt: Backdoor attack on prompt-based large language models,\" in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 7745-7749." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.783, + 0.923, + 0.841 + ], + "angle": 0, + "content": "[132] S. Zhao, J. Wen, L. A. Tuan, J. Zhao, and J. Fu, \"Prompt as triggers for backdoor attack: Examining the vulnerability in language models,\" arXiv preprint arXiv:2305.01219, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.841, + 0.923, + 0.899 + ], + "angle": 0, + "content": "[133] Z. Han, C. Gao, J. Liu, J. Zhang, and S. Q. Zhang, \"Parameter-efficient fine-tuning for large models: A comprehensive survey,\" arXiv preprint arXiv:2403.14608, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.899, + 0.923, + 0.944 + ], + "angle": 0, + "content": "[134] L. Xu, H. Xie, S.-Z. J. Qin, X. Tao, and F. L. Wang, \"Parameter-efficient fine-tuning methods for pretrained language models: A critical review and" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.921, + 0.043 + ], + "angle": 0, + "content": "41" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.466, + 0.068 + ], + "angle": 0, + "content": "assessment,\" arXiv preprint arXiv:2312.12148, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.069, + 0.492, + 0.142 + ], + "angle": 0, + "content": "[135] N. Ding, Y. Qin, G. Yang, F. Wei, Z. Yang, Y. Su, S. Hu, Y. Chen, C.-M. Chan, W. Chen et al., \"Parameter-efficient fine-tuning of large-scale pre-trained language models,\" Nature Machine Intelligence, vol. 5, no. 3, pp. 220-235, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.143, + 0.492, + 0.2 + ], + "angle": 0, + "content": "[136] S. Zhao, L. Gan, L. A. Tuan, J. Fu, L. Lyu, M. Jia, and J. Wen, \"Defending against weight-poisoning backdoor attacks for parameter-efficient fine-tuning,\" arXiv preprint arXiv:2402.12168, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.2, + 0.492, + 0.258 + ], + "angle": 0, + "content": "[137] J. Kim, M. Song, S. H. Na, and S. Shin, \"Obliviate: Neutralizing task-agnostic backdoors within the parameter-efficient fine-tuning paradigm,\" arXiv preprint arXiv:2409.14119, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.259, + 0.492, + 0.316 + ], + "angle": 0, + "content": "[138] S. Jiang, S. R. Kadhe, Y. Zhou, F. Ahmed, L. Cai, and N. Baracaldo, \"Turning generative models degenerate: The power of data poisoning attacks,\" arXiv preprint arXiv:2407.12281, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.317, + 0.492, + 0.375 + ], + "angle": 0, + "content": "[139] T. Li, A. K. Sahu, A. Talwalkar, and V. Smith, \"Federated learning: Challenges, methods, and future directions,\" IEEE signal processing magazine, vol. 37, no. 3, pp. 50-60, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.376, + 0.492, + 0.418 + ], + "angle": 0, + "content": "[140] C. Zhang, Y. Xie, H. Bai, B. Yu, W. Li, and Y. Gao, \"A survey on federated learning,\" Knowledge-Based Systems, vol. 216, p. 106775, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.419, + 0.492, + 0.462 + ], + "angle": 0, + "content": "[141] L. Li, Y. Fan, M. Tse, and K.-Y. Lin, \"A review of applications in federated learning,\" Computers & Industrial Engineering, vol. 149, p. 106854, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.462, + 0.492, + 0.521 + ], + "angle": 0, + "content": "[142] Z. Wang, Z. Shen, Y. He, G. Sun, H. Wang, L. Lyu, and A. Li, \"Flora: Federated fine-tuning large language models with heterogeneous low-rank adaptations,\" arXiv preprint arXiv:2409.05976, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.521, + 0.492, + 0.564 + ], + "angle": 0, + "content": "[143] C. Chen, X. Feng, Y. Li, L. Lyu, J. Zhou, X. Zheng, and J. Yin, \"Integration of large language models and federated learning,\" *Patterns*, vol. 5, no. 12, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.565, + 0.492, + 0.622 + ], + "angle": 0, + "content": "[144] W. Zhuang, C. Chen, and L. Lyu, \"When foundation model meets federated learning: Motivations, challenges, and future directions,\" arXiv preprint arXiv:2306.15546, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.622, + 0.492, + 0.681 + ], + "angle": 0, + "content": "[145] G. Sun, Y. Cong, J. Dong, Q. Wang, L. Lyu, and J. Liu, \"Data poisoning attacks on federated machine learning,\" IEEE Internet of Things Journal, vol. 9, no. 13, pp. 11365-11375, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.681, + 0.492, + 0.753 + ], + "angle": 0, + "content": "[146] L. Lyu, H. Yu, X. Ma, C. Chen, L. Sun, J. Zhao, Q. Yang, and P. S. Yu, \"Privacy and robustness in federated learning: Attacks and defenses,\" IEEE transactions on neural networks and learning systems, vol. 35, no. 7, pp. 8726-8746, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.754, + 0.492, + 0.812 + ], + "angle": 0, + "content": "[147] R. Ye, J. Chai, X. Liu, Y. Yang, Y. Wang, and S. Chen, \"Emerging safety attack and defense in federated instruction tuning of large language models,\" arXiv preprint arXiv:2406.10630, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.812, + 0.492, + 0.885 + ], + "angle": 0, + "content": "[148] Z. Zhang, A. Panda, L. Song, Y. Yang, M. Mahoney, P. Mittal, R. Kannan, and J. Gonzalez, \"Neurotoxin: Durable backdoors in federated learning,\" in International Conference on Machine Learning. PMLR, 2022, pp. 26429-26446." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.885, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[149] T. Fu, M. Sharma, P. Torr, S. B. Cohen, D. Krueger, and F. Berez, “Poisonbench: Assessing large language model vulnerability to data poisoning,” arXiv preprint arXiv:2410.08811, 2024." + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.054, + 0.922, + 0.111 + ], + "angle": 0, + "content": "[150] P. Pathmanathan, S. Chakraborty, X. Liu, Y. Liang, and F. Huang, \"Is poisoning a real threat to llm alignment? maybe more so than you think,\" arXiv preprint arXiv:2406.12091, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.112, + 0.922, + 0.17 + ], + "angle": 0, + "content": "[151] A. Wan, E. Wallace, S. Shen, and D. Klein, “Poisoning language models during instruction tuning,” in International Conference on Machine Learning. PMLR, 2023, pp. 35413-35425." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.17, + 0.922, + 0.213 + ], + "angle": 0, + "content": "[152] J. Rando and F. Tramer, \"Universal jailbreak backdoors from poisoned human feedback,\" arXiv preprint arXiv:2311.14455, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.215, + 0.922, + 0.258 + ], + "angle": 0, + "content": "[153] T. Baumgartner, Y. Gao, D. Alon, and D. Metzler, \"Best-of-venom: Attacking rlhf by injecting poisoned preference data,\" arXiv preprint arXiv:2404.05530, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.259, + 0.922, + 0.316 + ], + "angle": 0, + "content": "[154] B. Chen, H. Guo, G. Wang, Y. Wang, and Q. Yan, \"The dark side of human feedback: Poisoning large language models via user inputs,\" arXiv preprint arXiv:2409.00787, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.317, + 0.922, + 0.389 + ], + "angle": 0, + "content": "[155] Y. Bai, A. Jones, K. Ndousse, A. Askell, A. Chen, N. DasSarma, D. Drain, S. Fort, D. Ganguli, T. Henighan et al., \"Training a helpful and harmless assistant with reinforcement learning from human feedback,\" arXiv preprint arXiv:2204.05862, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.39, + 0.922, + 0.447 + ], + "angle": 0, + "content": "[156] H. Dong, W. Xiong, B. Pang, H. Wang, H. Zhao, Y. Zhou, N. Jiang, D. Sahoo, C. Xiong, and T. Zhang, \"Rlhf workflow: From reward modeling to online rlhf,\" arXiv preprint arXiv:2405.07863, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.448, + 0.922, + 0.521 + ], + "angle": 0, + "content": "[157] W. Xiong, H. Dong, C. Ye, Z. Wang, H. Zhong, H. Ji, N. Jiang, and T. Zhang, \"Iterative preference learning from human feedback: Bridging theory and practice for rlhf under kl-constraint,\" arXiv preprint arXiv:2312.11456, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.521, + 0.922, + 0.579 + ], + "angle": 0, + "content": "[158] H. Lee, S. Phatale, H. Mansoor, K. R. Lu, T. Mesnard, J. Ferret, C. Bishop, E. Hall, V. Carbune, and A. Rastogi, \"Rlaif: Scaling reinforcement learning from human feedback with ai feedback,\" 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.579, + 0.922, + 0.652 + ], + "angle": 0, + "content": "[159] R. Rafailov, A. Sharma, E. Mitchell, C. D. Manning, S. Ermon, and C. Finn, \"Direct preference optimization: Your language model is secretly a reward model,\" Advances in Neural Information Processing Systems, vol. 36, pp. 53728-53741, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.652, + 0.922, + 0.71 + ], + "angle": 0, + "content": "[160] J. Wang, J. Wu, M. Chen, Y. Vorobeychik, and C. Xiao, \"Rlhfpoison: Reward poisoning attack for reinforcement learning with human feedback in large language models,\" arXiv preprint arXiv:2311.09641, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.71, + 0.922, + 0.768 + ], + "angle": 0, + "content": "[161] S. Gunasekar, Y. Zhang, J. Aneja, C. C. T. Mendes, A. Del Giorno, S. Gopi, M. Javaheripi, P. Kauffmann, G. de Rosa, O. Saarikivi et al., \"Textbooks are all you need,\" arXiv preprint arXiv:2306.11644, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.768, + 0.922, + 0.825 + ], + "angle": 0, + "content": "[162] Y. Li, S. Bubeck, R. Eldan, A. Del Giorno, S. Gunasekar, and Y. T. Lee, \"Textbooks are all you need ii: phi-1.5 technical report,\" arXiv preprint arXiv:2309.05463, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.826, + 0.922, + 0.885 + ], + "angle": 0, + "content": "[163] J. Zhan, J. Dai, J. Ye, Y. Zhou, D. Zhang, Z. Liu, X. Zhang, R. Yuan, G. Zhang, L. Li et al., \"Anygpt: Unified multimodal llm with discrete sequence modeling,\" arXiv preprint arXiv:2402.12226, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.885, + 0.922, + 0.943 + ], + "angle": 0, + "content": "[164] H. Wang, C. Liu, N. Xi, Z. Qiang, S. Zhao, B. Qin, and T. Liu, \"Huatuo: Tuning llama model with chinese medical knowledge,\" arXiv preprint arXiv:2304.06975, 2023." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.922, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "42" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.111 + ], + "angle": 0, + "content": "[165] P. Sutanto, J. Santoso, E. I. Setiawan, and A. P. Wibawa, \"Llm distillation for efficient few-shot multiple choice question answering,\" arXiv preprint arXiv:2412.09807, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.112, + 0.492, + 0.169 + ], + "angle": 0, + "content": "[166] X. Zhu, J. Li, Y. Liu, C. Ma, and W. Wang, \"Distilling mathematical reasoning capabilities into small language models,\" Neural Networks, vol. 179, p. 106594, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.17, + 0.492, + 0.243 + ], + "angle": 0, + "content": "[167] R. Xu, H. Cui, Y. Yu, X. Kan, W. Shi, Y. Zhuang, W. Jin, J. Ho, and C. Yang, \"Knowledge-infused prompting: Assessing and advancing clinical text data generation with large language models,\" arXiv preprint arXiv:2311.00287, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.243, + 0.492, + 0.301 + ], + "angle": 0, + "content": "[168] N. Crispino, K. Montgomery, F. Zeng, D. Song, and C. Wang, \"Agent instructs large language models to be general zero-shot reasoners,\" arXiv preprint arXiv:2310.03710, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.302, + 0.492, + 0.36 + ], + "angle": 0, + "content": "[169] C. Li, C. Zhang, Y. Lu, J. Zhang, Q. Sun, X. Wang, J. Wei, G. Wang, Y. Yang, and H. T. Shen, \"Syzygy of thoughts: Improving llm cot with the minimal free resolution,\" arXiv preprint arXiv:2504.09566, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.36, + 0.492, + 0.418 + ], + "angle": 0, + "content": "[170] Z. Chen, K. Liu, Q. Wang, W. Zhang, J. Liu, D. Lin, K. Chen, and F. Zhao, \"Agent-flan: Designing data and methods of effective agent tuning for large language models,\" arXiv preprint arXiv:2403.12881, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.418, + 0.492, + 0.476 + ], + "angle": 0, + "content": "[171] C. Xu, Q. Sun, K. Zheng, X. Geng, P. Zhao, J. Feng, C. Tao, and D. Jiang, \"Wizardlm: Empowering large language models to follow complex instructions,\" arXiv preprint arXiv:2304.12244, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.477, + 0.492, + 0.535 + ], + "angle": 0, + "content": "[172] S. Mukherjee, A. Mitra, G. Jawahar, S. Agarwal, H. Palangi, and A. Awadallah, \"Orca: Progressive learning from complex explanation traces of gpt-4,\" arXiv preprint arXiv:2306.02707, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.535, + 0.492, + 0.593 + ], + "angle": 0, + "content": "[173] Y. Wang, Y. Kordi, S. Mishra, A. Liu, N. A. Smith, D. Khashabi, and H. Hajishirzi, \"Self-instruct: Aligning language models with self-generated instructions,\" arXiv preprint arXiv:2212.10560, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.593, + 0.492, + 0.651 + ], + "angle": 0, + "content": "[174] R. Ri, S. Kiyono, and S. Takase, \"Self-translatabrain: Enhancing cross-lingual transfer of large language models via inherent capability,\" arXiv preprint arXiv:2407.00454, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.652, + 0.492, + 0.724 + ], + "angle": 0, + "content": "[175] J. Ji, M. Liu, J. Dai, X. Pan, C. Zhang, C. Bian, B. Chen, R. Sun, Y. Wang, and Y. Yang, \"Beavertails: Towards improved safety alignment of llm via a human-preference dataset,\" Advances in Neural Information Processing Systems, vol. 36, pp. 24678-24704, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.724, + 0.492, + 0.783 + ], + "angle": 0, + "content": "[176] H. Lightman, V. Kosaraju, Y. Burda, H. Edwards, B. Baker, T. Lee, J. Leike, J. Schulman, I. Sutskever, and K. Cobbe, \"Let's verify step by step,\" in The Twelfth International Conference on Learning Representations, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.783, + 0.492, + 0.855 + ], + "angle": 0, + "content": "[177] R. Nakano, J. Hilton, S. Balaji, J. Wu, L. Ouyang, C. Kim, C. Hesse, S. Jain, V. Kosaraju, W. Saunders et al., \"Webgpt: Browser-assisted question-answering with human feedback,\" arXiv preprint arXiv:2112.09332, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.855, + 0.492, + 0.898 + ], + "angle": 0, + "content": "[178] C. Chen, J. Fu, and L. Lyu, \"A pathway towards responsible ai generated content,\" arXiv preprint arXiv:2303.01325, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.899, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[179] A. Akkus, M. P. Aghdam, M. Li, J. Chu, M. Backes, Y. Zhang, and S. Sav, \"Generated data with fake privacy: Hidden dangers of fine-tuning large lan" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.923, + 0.083 + ], + "angle": 0, + "content": "guage models on generated data,\" arXiv preprint arXiv:2409.11423, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.083, + 0.923, + 0.155 + ], + "angle": 0, + "content": "[180] Y. Song, J. Zhang, Z. Tian, Y. Yang, M. Huang, and D. Li, \"Llm-based privacy data augmentation guided by knowledge distillation with a distribution tutor for medical text classification,\" arXiv preprint arXiv:2402.16515, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.155, + 0.923, + 0.213 + ], + "angle": 0, + "content": "[181] A. Kang, J. Y. Chen, Z. Lee-Youngzie, and S. Fu, \"Synthetic data generation with llm for improved depression prediction,\" arXiv preprint arXiv:2411.17672, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.213, + 0.923, + 0.258 + ], + "angle": 0, + "content": "[182] A. Taubenfeld, Y. Dover, R. Reichart, and A. Goldstein, \"Systematic biases in llm simulations of debates,\" arXiv preprint arXiv:2402.04049, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.259, + 0.923, + 0.317 + ], + "angle": 0, + "content": "[183] A. Mishra, G. Nayak, S. Bhattacharya, T. Kumar, A. Shah, and M. Foltin, \"Llm-guided counterfactual data generation for fairer ai,\" in Companion Proceedings of the ACM Web Conference 2024, 2024, pp. 1538-1545." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.317, + 0.923, + 0.389 + ], + "angle": 0, + "content": "[184] Y. Yu, Y. Zhuang, J. Zhang, Y. Meng, A. J. Ratner, R. Krishna, J. Shen, and C. Zhang, \"Large language model as attributed training data generator: A tale of diversity and bias,\" Advances in Neural Information Processing Systems, vol. 36, pp. 55734-55784, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.389, + 0.923, + 0.433 + ], + "angle": 0, + "content": "[185] A. Borah and R. Mihalcea, \"Towards implicit bias detection and mitigation in multi-agent lvm interactions,\" arXiv preprint arXiv:2410.02584, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.433, + 0.923, + 0.476 + ], + "angle": 0, + "content": "[186] X. Dong, Y. Wang, P. S. Yu, and J. Caverlee, \"Disclosure and mitigation of gender bias in llms,\" arXiv preprint arXiv:2402.11190, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.476, + 0.923, + 0.55 + ], + "angle": 0, + "content": "[187] I. M. Serouis and F. Sèdes, “Exploring large language models for bias mitigation and fairness,” in 1st International Workshop on AI Governance (AIGOV) in conjunction with the Thirty-Third International Joint Conference on Artificial Intelligence, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.55, + 0.923, + 0.637 + ], + "angle": 0, + "content": "[188] Y. Chen, Q. Fu, Y. Yuan, Z. Wen, G. Fan, D. Liu, D. Zhang, Z. Li, and Y. Xiao, \"Hallucination detection: Robustly discerning reliable answers in large language models,\" in Proceedings of the 32nd ACM International Conference on Information and Knowledge Management, 2023, pp. 245-255." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.637, + 0.923, + 0.695 + ], + "angle": 0, + "content": "[189] N. Chakraborty, M. Ornik, and K. Driggs-Campbell, \"Hallucination detection in foundation models for decision-making: A flexible definition and review of the state of the art,\" ACM Computing Surveys, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.695, + 0.923, + 0.738 + ], + "angle": 0, + "content": "[190] E. Entezami and A. Naseh, \"Llm misalignment via adversarial rlhf platforms,\" arXiv preprint arXiv:2503.03039, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.738, + 0.923, + 0.797 + ], + "angle": 0, + "content": "[191] J. Achiam, S. Adler, S. Agarwal, L. Ahmad, I. Akkaya, F. L. Aleman, D. Almeida, J. Altenschmidt, S. Altman, S. Anadkat et al., \"Gpt-4 technical report,\" arXiv preprint arXiv:2303.08774, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.797, + 0.923, + 0.855 + ], + "angle": 0, + "content": "[192] A. Young, B. Chen, C. Li, C. Huang, G. Zhang, G. Zhang, G. Wang, H. Li, J. Zhu, J. Chen et al., \"Yi: Open foundation models by 01. ai,\" arXiv preprint arXiv:2403.04652, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.855, + 0.923, + 0.914 + ], + "angle": 0, + "content": "[193] A. Dubey, A. Jauhri, A. Pandey, A. Kadian, A. Al-Dahle, A. Letman, A. Mathur, A. Schelten, A. Yang, A. Fan et al., \"The llama 3 herd of models,\" arXiv preprint arXiv:2407.21783, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.914, + 0.923, + 0.943 + ], + "angle": 0, + "content": "[194] Z. Cai, M. Cao, H. Chen, K. Chen, K. Chen, X. Chen, X. Chen, Z. Chen, Z. Chen, P. Chu et al., \"InternlM2" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "43" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.053, + 0.492, + 0.081 + ], + "angle": 0, + "content": "technical report,\" arXiv preprint arXiv:2403.17297, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.083, + 0.492, + 0.141 + ], + "angle": 0, + "content": "[195] R. Anil, A. M. Dai, O. First, M. Johnson, D. Lepikhin, A. Passos, S. Shakeri, E. Taropa, P. Bailey, Z. Chen et al., \"Palm 2 technical report,\" arXiv preprint arXiv:2305.10403, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.142, + 0.492, + 0.2 + ], + "angle": 0, + "content": "[196] T. GLM, A. Zeng, B. Xu, B. Wang, C. Zhang, D. Yin, D. Zhang, D. Rojas, G. Feng, H. Zhao et al., \"Chatglm: A family of large language models from glm-130b to glm-4 all tools,\" arXiv preprint arXiv:2406.12793, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.2, + 0.492, + 0.258 + ], + "angle": 0, + "content": "[197] G. Team, R. Anil, S. Borgeaud, J.-B. Alayrac, J. Yu, R. Soricut, J. Schalkwyk, A. M. Dai, A. Hauth, K. Millican et al., \"Gemini: a family of highly capable multimodal models,\" arXiv preprint arXiv:2312.11805, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.259, + 0.492, + 0.33 + ], + "angle": 0, + "content": "[198] G. Team, T. Mesnard, C. Hardin, R. Dadashi, S. Bhupatiraju, S. Pathak, L. Sifre, M. Rivière, M. S. Kale, J. Love et al., \"Gemma: Open models based on gemini research and technology,\" arXiv preprint arXiv:2403.08295, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.331, + 0.492, + 0.389 + ], + "angle": 0, + "content": "[199] D. Groeneveld, I. Beltagy, P. Walsh, A. Bhagia, R. Kinney, O. Tafjord, A. H. Jha, H. Ivison, I. Magnusson, Y. Wang et al., \"Olmo: Accelerating the science of language models,\" arXiv preprint arXiv:2402.00838, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.39, + 0.492, + 0.447 + ], + "angle": 0, + "content": "[200] B. Adler, N. Agarwal, A. Aithal, D. H. Anh, P. Bhattacharya, A. Brundyn, J. Casper, B. Catanzaro, S. Clay, J. Cohen et al., \"Nemotron-4 340b technical report,\" arXiv preprint arXiv:2406.11704, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.448, + 0.492, + 0.506 + ], + "angle": 0, + "content": "[201] A. Jaech, A. Kalai, A. Lerer, A. Richardson, A. El-Kishky, A. Low, A. Helyar, A. Madry, A. Beutel, A. Carney et al., \"Openai o1 system card,\" arXiv preprint arXiv:2412.16720, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.507, + 0.492, + 0.55 + ], + "angle": 0, + "content": "[202] OpenAI, \"Gpt-4o mini: advancing cost-efficient intelligence,\" 2024, https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.55, + 0.492, + 0.607 + ], + "angle": 0, + "content": "[203] A. Yang, B. Xiao, B. Wang, B. Zhang, C. Bian, C. Yin, C. Lv, D. Pan, D. Wang, D. Yan et al., \"Baichuan 2: Open large-scale language models,\" arXiv preprint arXiv:2309.10305, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.608, + 0.492, + 0.681 + ], + "angle": 0, + "content": "[204] J. Welbl, A. Glaese, J. Uesato, S. Dathathri, J. Mellor, L. A. Hendricks, K. Anderson, P. Kohli, B. Coppin, and P.-S. Huang, \"Challenges in detoxifying language models,\" in Findings of the Association for Computational Linguistics: EMNLP 2021, 2021, pp. 2447-2469." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.681, + 0.492, + 0.738 + ], + "angle": 0, + "content": "[205] H. Ngo, C. Raterink, J. G. Araújo, I. Zhang, C. Chen, A. Morisot, and N. Frosst, \"Mitigating harm in language models with conditional-likelihood filtration,\" arXiv preprint arXiv:2108.07790, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.739, + 0.492, + 0.782 + ], + "angle": 0, + "content": "[206] Y. Chen, W. Cai, L. Wu, X. Li, Z. Xin, and C. Fu, \"Tigerbot: An open multilingual multitask llm,\" arXiv preprint arXiv:2312.08688, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.783, + 0.492, + 0.87 + ], + "angle": 0, + "content": "[207] S. Prabhumoye, M. Patwary, M. Shoeybi, and B. Catanzaro, \"Adding instructions during pretraining: Effective way of controlling toxicity in language models,\" in Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics, 2023, pp. 2636-2651." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.871, + 0.492, + 0.942 + ], + "angle": 0, + "content": "[208] Y. Ge, W. Sun, Y. Lou, C. Fang, Y. Zhang, Y. Li, X. Zhang, Y. Liu, Z. Zhao, and Z. Chen, \"Demonstration attack against in-context learning for code intelligence,\" CoRR, vol. abs/2410.02841, no. 1, pp. 1-17, 2024." + }, + { + "type": "list", + "bbox": [ + 0.076, + 0.053, + 0.492, + 0.942 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.127 + ], + "angle": 0, + "content": "[209] G. Team, P. Georgiev, V. I. Lei, R. Burnell, L. Bai, A. Gulati, G. Tanzer, D. Vincent, Z. Pan, S. Wang et al., \"Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context,\" arXiv preprint arXiv:2403.05530, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.128, + 0.923, + 0.198 + ], + "angle": 0, + "content": "[210] J. Parmar, S. Prabhumoye, J. Jennings, M. Patwary, S. Subramanian, D. Su, C. Zhu, D. Narayanan, A. Jhunjunwala, A. Dattagupta et al., \"Nemotron-4 15b technical report,\" arXiv preprint arXiv:2402.16819, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.2, + 0.923, + 0.272 + ], + "angle": 0, + "content": "[211] C. Raffel, N. Shazeer, A. Roberts, K. Lee, S. Narang, M. Matena, Y. Zhou, W. Li, and P. J. Liu, \"Exploring the limits of transfer learning with a unified text-to-text transformer,\" Journal of machine learning research, vol. 21, no. 140, pp. 1-67, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.273, + 0.923, + 0.346 + ], + "angle": 0, + "content": "[212] T. Markov, C. Zhang, S. Agarwal, F. E. Nekoul, T. Lee, S. Adler, A. Jiang, and L. Weng, “A holistic approach to undesired content detection in the real world,” in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 37, no. 12, 2023, pp. 15009-15018." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.346, + 0.923, + 0.403 + ], + "angle": 0, + "content": "[213] A. Dubey, A. Jauhri, A. Pandey, A. Kadian, A. Al-Dahle, A. Letman, A. Mathur, A. Schelten, A. Yang, A. Fan et al., \"The llama 3 herd of models,\" arXiv preprint arXiv:2407.21783, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.403, + 0.923, + 0.462 + ], + "angle": 0, + "content": "[214] T. Huang, S. Hu, F. Ilhan, S. F. Tekin, and L. Liu, \"Harmful fine-tuning attacks and defenses for large language models: A survey,\" arXiv preprint arXiv:2409.18169, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.462, + 0.923, + 0.535 + ], + "angle": 0, + "content": "[215] J. Wu, Y. Xie, Z. Yang, J. Wu, J. Chen, J. Gao, B. Ding, X. Wang, and X. He, \"Towards robust alignment of language models: Distributionally robustifying direct preference optimization,\" arXiv preprint arXiv:2407.07880, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.535, + 0.923, + 0.592 + ], + "angle": 0, + "content": "[216] Z. Xu, S. Vemuri, K. Panaganti, D. Kalathil, R. Jain, and D. Ramachandran, \"Distributionally robust direct preference optimization,\" arXiv preprint arXiv:2502.01930, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.593, + 0.923, + 0.651 + ], + "angle": 0, + "content": "[217] J. Dai, X. Pan, R. Sun, J. Ji, X. Xu, M. Liu, Y. Wang, and Y. Yang, \"Safe rlhf: Safe reinforcement learning from human feedback,\" in The Twelfth International Conference on Learning Representations, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.652, + 0.923, + 0.739 + ], + "angle": 0, + "content": "[218] C. O. Retzlaff, S. Das, C. Wayllace, P. Mousavi, M. Afshari, T. Yang, A. Saranti, A. Angerschmid, M. E. Taylor, and A. Holzinger, \"Human-in-the-loop reinforcement learning: A survey and position on requirements, challenges, and opportunities,\" Journal of Artificial Intelligence Research, vol. 79, pp. 359-415, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.739, + 0.923, + 0.797 + ], + "angle": 0, + "content": "[219] S. Milani, N. Topin, M. Veloso, and F. Fang, \"Explainable reinforcement learning: A survey and comparative review,\" ACM Computing Surveys, vol. 56, no. 7, pp. 1-36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.798, + 0.923, + 0.87 + ], + "angle": 0, + "content": "[220] A. Ahmadian, C. Cremer, M. Galle, M. Fadaee, J. Kreutzer, O. Pietquin, A. Üstün, and S. Hooker, \"Back to basics: Revisiting reinforce style optimization for learning from human feedback in llms,\" arXiv preprint arXiv:2402.14740, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.871, + 0.923, + 0.928 + ], + "angle": 0, + "content": "[221] T. Liu, Z. Qin, J. Wu, J. Shen, M. Khalman, R. Joshi, Y. Zhao, M. Saleh, S. Baumgartner, J. Liu et al., \"Lipo: Listwise preference optimization through learning-torank,\" arXiv preprint arXiv:2402.01878, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.929, + 0.923, + 0.944 + ], + "angle": 0, + "content": "[222] F. Song, B. Yu, M. Li, H. Yu, F. Huang, Y. Li, and" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "44" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.492, + 0.111 + ], + "angle": 0, + "content": "H. Wang, \"Preference ranking optimization for human alignment,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 17, 2024, pp. 18990-18998." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.112, + 0.492, + 0.184 + ], + "angle": 0, + "content": "[223] Z. Wang, B. Bi, S. K. Pentyala, K. Ramnath, S. Chaudhuri, S. Mehrotra, X.-B. Mao, S. Asur et al., \"A comprehensive survey of llm alignment techniques: Rlhf, rlaif, ppo, dpo and more,\" arXiv preprint arXiv:2407.16216, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.185, + 0.492, + 0.272 + ], + "angle": 0, + "content": "[224] T. Huang, S. Hu, F. Ilhan, S. F. Tekin, and L. Liu, \"Lisa: Lazy safety alignment for large language models against harmful fine-tuning attack,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=RPChapuXIC" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.273, + 0.492, + 0.359 + ], + "angle": 0, + "content": "[225] T. Huang, S. Hu, and L. Liu, \"Vaccine: Perturbation-aware alignment for large language models against harmful fine-tuning attack,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=lpXDZKiAnt" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.36, + 0.492, + 0.461 + ], + "angle": 0, + "content": "[226] J. Wang, J. Li, Y. Li, X. Qi, J. Hu, Y. Li, P. McDaniel, M. Chen, B. Li, and C. Xiao, \"Backdooralign: Mitigating fine-tuning based jailbreak attack with backdoor enhanced safety alignment,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=1PcjJ5Evta7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.462, + 0.492, + 0.564 + ], + "angle": 0, + "content": "[227] F. Bianchi, M. Suzgun, G. Attanasio, P. Rottger, D. Jurafsky, T. Hashimoto, and J. Zou, \"Safety-tuned LLaMAs: Lessons from improving the safety of large language models that follow instructions,\" in The Twelfth International Conference on Learning Representations, 2024. [Online]. Available: https://openreview.net/forum?id=gT5hALch9z" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.565, + 0.492, + 0.651 + ], + "angle": 0, + "content": "[228] H. Shen, P.-Y. Chen, P. Das, and T. Chen, \"SEAL: Safety-enhanced aligned LLM fine-tuning via bilevel data selection,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=VHguhvcoM5" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.652, + 0.492, + 0.695 + ], + "angle": 0, + "content": "[229] R. Tang, J. Yuan, Y. Li, Z. Liu, R. Chen, and X. Hu, \"Setting the trap: Capturing and defeating backdoor threats in plms through honeypots,\" NeurIPS, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.695, + 0.492, + 0.796 + ], + "angle": 0, + "content": "[230] C.-Y. Hsu, Y.-L. Tsai, C.-H. Lin, P.-Y. Chen, C.-M. Yu, and C.-Y. Huang, \"Safe loRA: The silver lining of reducing safety risks when finetuning large language models,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=HcifdQZFV" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.798, + 0.492, + 0.884 + ], + "angle": 0, + "content": "[231] R. Hazra, S. Layek, S. Banerjee, and S. Poria, \"Safety arithmetic: A framework for test-time safety alignment of language models by steering parameters and activations,\" in Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, 2024, pp. 21759-21776." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.885, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[232] Y. Du, S. Zhao, D. Zhao, M. Ma, Y. Chen, L. Huo, Q. Yang, D. Xu, and B. Qin, \"MoGU: A framework for enhancing safety of LLMs while preserving their usability,\" in The Thirty-" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.922, + 0.098 + ], + "angle": 0, + "content": "eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=SrFbgIjb53" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.098, + 0.922, + 0.156 + ], + "angle": 0, + "content": "[233] X. Yi, S. Zheng, L. Wang, G. de Melo, X. Wang, and L. He, \"Nlsr: Neuron-level safety realignment of large language models against harmful fine-tuning,\" arXiv preprint arXiv:2412.12497, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.157, + 0.922, + 0.213 + ], + "angle": 0, + "content": "[234] D. Shi, T. Shen, Y. Huang, Z. Li, Y. Leng, R. Jin, C. Liu, X. Wu, Z. Guo, L. Yu et al., \"Large language model safety: A holistic survey,\" arXiv preprint arXiv:2412.17686, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.215, + 0.922, + 0.286 + ], + "angle": 0, + "content": "[235] B. Ni, Z. Liu, L. Wang, Y. Lei, Y. Zhao, X. Cheng, Q. Zeng, L. Dong, Y. Xia, K. Kenthapadi et al., \"Towards trustworthy retrieval augmented generation for large language models: A survey,\" arXiv preprint arXiv:2502.06872, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.287, + 0.922, + 0.373 + ], + "angle": 0, + "content": "[236] F. Berez, T. Fu, A. Prabhu, S. Casper, A. Sanyal, A. Bibi, A. O'Gara, R. Kirk, B. Bucknall, T. Fist, L. Ong, P. Torr, K. Lam, R. Trager, D. Krueger, S. Mindermann, J. Hernández-Orallo, M. Geva, and Y. Gal, \"Open problems in machine unlearning for AI safety,\" CoRR, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.375, + 0.922, + 0.446 + ], + "angle": 0, + "content": "[237] U. Anwar, A. Saparov, J. Rando, D. Paleka, M. Turpin, P. Hase, E. S. Lubana, E. Jenner, S. Casper, O. Sourbut et al., “Foundational challenges in assuring alignment and safety of large language models,” arXiv preprint arXiv:2404.09932, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.447, + 0.922, + 0.505 + ], + "angle": 0, + "content": "[238] X. Qi, Y. Zeng, T. Xie, P.-Y. Chen, R. Jia, P. Mittal, and P. Henderson, \"Fine-tuning aligned language models compromises safety, even when users do not intend to!\" arXiv preprint arXiv:2310.03693, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.506, + 0.922, + 0.564 + ], + "angle": 0, + "content": "[239] X. Yang, X. Wang, Q. Zhang, L. Petzold, W. Y. Wang, X. Zhao, and D. Lin, \"Shadow alignment: The ease of subverting safely-aligned language models.(2023),\" arXiv preprint arXiv:2310.02949, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.565, + 0.922, + 0.607 + ], + "angle": 0, + "content": "[240] Q. Zhan, R. Fang, R. Bindu, A. Gupta, T. Hashimoto, and D. Kang, \"Removing rlhf protections in gpt-4 via fine-tuning,\" arXiv preprint arXiv:2311.05553, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.608, + 0.922, + 0.666 + ], + "angle": 0, + "content": "[241] J. Kazdan, L. Yu, R. Schaeffer, C. Cundy, S. Koyejo, and D. Krishnamurthy, \"No, of course i can! refusal mechanisms can be exploited using harmless finetuning data,\" arXiv preprint arXiv:2502.19537, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.666, + 0.922, + 0.723 + ], + "angle": 0, + "content": "[242] D. Halawi, A. Wei, E. Wallace, T. T. Wang, N. Haghtalab, and J. Steinhardt, \"Covert malicious finetuning: Challenges in safeguarding llm adaptation,\" arXiv preprint arXiv:2406.20053, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.724, + 0.922, + 0.782 + ], + "angle": 0, + "content": "[243] T. Huang, S. Hu, F. Ilhan, S. F. Tekin, and L. Liu, \"Virus: Harmful fine-tuning attack for large language models bypassing guardrail moderation,\" arXiv preprint arXiv:2501.17433, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.783, + 0.922, + 0.841 + ], + "angle": 0, + "content": "[244] Y. Qiang, X. Zhou, S. Z. Zade, M. A. Roshani, P. Khan-duri, D. Zytko, and D. Zhu, \"Learning to poison large language models during instruction tuning,\" arXiv preprint arXiv:2402.13459, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.842, + 0.922, + 0.884 + ], + "angle": 0, + "content": "[245] J. Raghuram, G. Kesidis, and D. J. Miller, \"A study of backdoors in instruction fine-tuned language models,\" arXiv preprint arXiv:2406.07778, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.885, + 0.922, + 0.943 + ], + "angle": 0, + "content": "[246] J. Yi, R. Ye, Q. Chen, B. Zhu, S. Chen, D. Lian, G. Sun, X. Xie, and F. Wu, \"On the vulnerability of safety alignment in open-access llms,\" in Findings of the Association for Computational Linguistics ACL 2024," + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.922, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "45" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.262, + 0.068 + ], + "angle": 0, + "content": "2024, pp. 9236-9260." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.069, + 0.492, + 0.112 + ], + "angle": 0, + "content": "[247] S. Lermen, C. Rogers-Smith, and J. Ladish, \"Lora finetuning efficiently undoes safety training in llama 2-chat 70b,\" arXiv preprint arXiv:2310.20624, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.113, + 0.492, + 0.142 + ], + "angle": 0, + "content": "[248] L. Piercing, \"Lora-as-an-attack! piercing llm safety under the share-and-play scenario.\"" + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.143, + 0.492, + 0.2 + ], + "angle": 0, + "content": "[249] S. Poppi, Z.-X. Yong, Y. He, B. Chern, H. Zhao, A. Yang, and J. Chi, \"Towards understanding the fragility of multilingual llms against fine-tuning attacks,\" arXiv preprint arXiv:2410.18210, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.2, + 0.492, + 0.257 + ], + "angle": 0, + "content": "[250] S. Li, E. C.-H. Ngai, F. Ye, and T. Voigt, \"Peft-as-an-attack! jailbreaking language models during federated parameter-efficient fine-tuning,\" arXiv preprint arXiv:2411.19335, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.259, + 0.492, + 0.316 + ], + "angle": 0, + "content": "[251] N. Razin, S. Malladi, A. Bhaskar, D. Chen, S. Arora, and B. Hanin, \"Unintentional unalignment: Likelihood displacement in direct preference optimization,\" arXiv preprint arXiv:2410.08847, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.317, + 0.492, + 0.375 + ], + "angle": 0, + "content": "[252] R. Xu, Y. Cai, Z. Zhou, R. Gu, H. Weng, Y. Liu, T. Zhang, W. Xu, and H. Qiu, \"Course-correction: Safety alignment using synthetic preferences,\" arXiv preprint arXiv:2407.16637, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.376, + 0.492, + 0.433 + ], + "angle": 0, + "content": "[253] J. Ji, B. Chen, H. Lou, D. Hong, B. Zhang, X. Pan, T. A. Qiu, J. Dai, and Y. Yang, \"Aligner: Efficient alignment by learning to correct,\" Advances in Neural Information Processing Systems, vol. 37, pp. 90853-90890, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.434, + 0.492, + 0.505 + ], + "angle": 0, + "content": "[254] D. Ganguli, L. Lovitt, J. Kernion, A. Askell, Y. Bai, S. Kadavath, B. Mann, E. Perez, N. Schiefer, K. Ndousse et al., \"Red teaming language models to reduce harms: Methods, scaling behaviors, and lessons learned,\" arXiv preprint arXiv:2209.07858, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.507, + 0.492, + 0.593 + ], + "angle": 0, + "content": "[255] T. Xiao, Y. Yuan, H. Zhu, M. Li, and V. G. Honavar, \"Cal-DPO: Calibrated direct preference optimization for language model alignment,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=57OQXxbTbY" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.594, + 0.492, + 0.652 + ], + "angle": 0, + "content": "[256] S. Guo, B. Zhang, T. Liu, T. Liu, M. Khalman, F. Llinares, A. Rame, T. Mesnard, Y. Zhao, B. Piot et al., \"Direct language model alignment from online ai feedback,\" arXiv preprint arXiv:2402.04792, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.652, + 0.492, + 0.695 + ], + "angle": 0, + "content": "[257] Z. Liu, X. Sun, and Z. Zheng, \"Enhancing llm safety via constrained direct preference optimization,\" arXiv preprint arXiv:2403.02475, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.696, + 0.492, + 0.797 + ], + "angle": 0, + "content": "[258] H. Lee, S. Phatale, H. Mansoor, T. Mesnard, J. Ferret, K. R. Lu, C. Bishop, E. Hall, V. Carbune, A. Rastogi, and S. Prakash, \"RLAIF vs. RLHF: Scaling reinforcement learning from human feedback with AI feedback,\" in *Forty-first International Conference on Machine Learning*, 2024. [Online]. Available: https://openreview.net/forum?id=uydQ2W41KO" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.798, + 0.492, + 0.869 + ], + "angle": 0, + "content": "[259] X. Lu, B. Yu, Y. Lu, H. Lin, H. Yu, L. Sun, X. Han, and Y. Li, \"Sofa: Shielded on-the-fly alignment via priority rule following,\" in Findings of the Association for Computational Linguistics ACL 2024, 2024, pp. 7108-7136." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.87, + 0.492, + 0.927 + ], + "angle": 0, + "content": "[260] A. Zou, Z. Wang, N. Carlini, M. Nasr, J. Z. Kolter, and M. Fredrikson, \"Universal and transferable adversarial attacks on aligned language models,\" arXiv preprint arXiv:2307.15043, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.928, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[261] P. Chao, A. Robey, E. Dobriban, H. Hassani, G. J." + }, + { + "type": "list", + "bbox": [ + 0.076, + 0.054, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.923, + 0.097 + ], + "angle": 0, + "content": "Pappas, and E. Wong, \"Jailbreaking black box large language models in twenty queries,\" arXiv preprint arXiv:2310.08419, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.098, + 0.923, + 0.155 + ], + "angle": 0, + "content": "[262] Z. Zhou, J. Xiang, H. Chen, Q. Liu, Z. Li, and S. Su, \"Speak out of turn: Safety vulnerability of large language models in multi-turn dialogue,\" arXiv preprint arXiv:2402.17262, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.157, + 0.923, + 0.213 + ], + "angle": 0, + "content": "[263] Q. Ren, H. Li, D. Liu, Z. Xie, X. Lu, Y. Qiao, L. Sha, J. Yan, L. Ma, and J. Shao, \"Derail yourself: Multi-turn llm jailbreak attack through self-discovered clues,\" arXiv preprint arXiv:2410.10700, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.215, + 0.923, + 0.288 + ], + "angle": 0, + "content": "[264] X. Pang, S. Tang, R. Ye, Y. Xiong, B. Zhang, Y. Wang, and S. Chen, \"Self-alignment of large language models via monopolylogue-based social scene simulation,\" in Proceedings of the 41st International Conference on Machine Learning, 2024, pp. 39-46." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.289, + 0.923, + 0.346 + ], + "angle": 0, + "content": "[265] J. Ji, D. Hong, B. Zhang, B. Chen, J. Dai, B. Zheng, T. Qiu, B. Li, and Y. Yang, \"Pku-saferlhf: Towards multi-level safety alignment for llms with human preference,\" arXiv preprint arXiv:2406.15513, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.346, + 0.923, + 0.418 + ], + "angle": 0, + "content": "[266] T. Mu, A. Helyar, J. Heidecke, J. Achiam, A. Vallone, I. D. Kivlichan, M. Lin, A. Beutel, J. Schulman, and L. Weng, \"Rule based rewards for language model safety,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.419, + 0.923, + 0.55 + ], + "angle": 0, + "content": "[267] X. Tan, S. Shi, X. Qiu, C. Qu, Z. Qi, Y. Xu, and Y. Qi, \"Self-criticism: Aligning large language models with their understanding of helpfulness, honesty, and harmlessness,\" in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track, M. Wang and I. Zitouni, Eds. Singapore: Association for Computational Linguistics, Dec. 2023, pp. 650-662. [Online]. Available: https://aclanthology.org/2023.emnlp-industry.62/" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.551, + 0.923, + 0.608 + ], + "angle": 0, + "content": "[268] M. Y. Guan, M. Joglekar, E. Wallace, S. Jain, B. Barak, A. Heylar, R. Dias, A. Vallone, H. Ren, J. Wei et al., \"Deliberative alignment: Reasoning enables safer language models,\" arXiv preprint arXiv:2412.16339, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.608, + 0.923, + 0.709 + ], + "angle": 0, + "content": "[269] B. Wei, K. Huang, Y. Huang, T. Xie, X. Qi, M. Xia, P. Mittal, M. Wang, and P. Henderson, \"Assessing the brittleness of safety alignment via pruning and low-rank modifications,\" in *Forty-first International Conference on Machine Learning*, 2024. [Online]. Available: https://openreview.net/forum?id=K6xxnKN2gm" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.71, + 0.923, + 0.797 + ], + "angle": 0, + "content": "[270] A. Arditi, O. B. Obeso, A. Syed, D. Paleka, N. Rimsky, W. Gurnee, and N. Nanda, \"Refusal in language models is mediated by a single direction,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=pH3XAQME6c" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.798, + 0.923, + 0.885 + ], + "angle": 0, + "content": "[271] R. Ye, J. Chai, X. Liu, Y. Yang, Y. Wang, and S. Chen, \"Emerging safety attack and defense in federated instruction tuning of large language models,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=sYNWqQYJhz" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.886, + 0.923, + 0.943 + ], + "angle": 0, + "content": "[272] J. Mukhoti, Y. Gal, P. Torr, and P. K. Dokania, \"Finetuning can cripple foundation models; preserving features may be the solution,\" 2024. [Online]. Available: https://openreview.net/forum?id=VQ7Q6qdp0P" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "46" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.127 + ], + "angle": 0, + "content": "[273] Y. Du, S. Zhao, J. Cao, M. Ma, D. Zhao, F. FAN, T. Liu, and B. Qin, \"Towards secure tuning: Mitigating security risks arising from benign instruction fine-tuning,\" 2024. [Online]. Available: https://openreview.net/forum?id=Egd7Vi1EuA" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.127, + 0.492, + 0.171 + ], + "angle": 0, + "content": "[274] J. Li and J.-E. Kim, \"Safety alignment shouldn't be complicated,\" 2025. [Online]. Available: https://openreview.net/forum?id=9H91juqfgb" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.171, + 0.492, + 0.243 + ], + "angle": 0, + "content": "[275] S. Li, L. Yao, L. Zhang, and Y. Li, \"Safety layers in aligned large language models: The key to LLM security,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=kUH1yPMAn7" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.243, + 0.492, + 0.331 + ], + "angle": 0, + "content": "[276] Z. Zhou, H. Yu, X. Zhang, R. Xu, F. Huang, K. Wang, Y. Liu, J. Fang, and Y. Li, \"On the role of attention heads in large language model safety,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=h0Ak8A5yqw" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.331, + 0.492, + 0.404 + ], + "angle": 0, + "content": "[277] M. Li, W. M. Si, M. Backes, Y. Zhang, and Y. Wang, \"SaloRA: Safety-alignment preserved low-rank adaptation,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=GOoVzE9nSj" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.404, + 0.492, + 0.491 + ], + "angle": 0, + "content": "[278] Y. Zong, O. Bohdal, T. Yu, Y. Yang, and T. Hospedales, \"Safety fine-tuning at (almost) no cost: A baseline for vision large language models,\" in *Forty-first International Conference on Machine Learning*, 2024. [Online]. Available: https://openreview.net/forum?id=bWZKvF0g7G" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.491, + 0.492, + 0.579 + ], + "angle": 0, + "content": "[279] F. Eiras, A. Petrov, P. Torr, M. P. Kumar, and A. Bibi, \"Do as i do (safely): Mitigating task-specific fine-tuning risks in large language models,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=IXE5lB6ppV" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.579, + 0.492, + 0.638 + ], + "angle": 0, + "content": "[280] J. Luo, X. Luo, K. Ding, J. Yuan, Z. Xiao, and M. Zhang, \"Robustft: Robust supervised fine-tuning for large language models under noisy response,\" 2024. [Online]. Available: https://arxiv.org/abs/2412.14922" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.638, + 0.492, + 0.724 + ], + "angle": 0, + "content": "[281] K. Lyu, H. Zhao, X. Gu, D. Yu, A. Goyal, and S. Arora, \"Keeping LLMs aligned after finetuning: The crucial role of prompt templates,\" in ICLR 2024 Workshop on Reliable and Responsible Foundation Models, 2024. [Online]. Available: https://openreview.net/forum?id=XlnpQOn95Z" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.724, + 0.492, + 0.797 + ], + "angle": 0, + "content": "[282] P. Hacker, A. Engel, and M. Mauer, \"Regulating chatgpt and other large generative ai models,\" in Proceedings of the 2023 ACM Conference on Fairness, Accountability, and Transparency. Association for Computing Machinery, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.797, + 0.492, + 0.87 + ], + "angle": 0, + "content": "[283] M. Kolla, S. Salunkhe, E. Chandrasekharan, and K. Saha, \"Llm-mod: Can large language models assist content moderation?\" in Extended Abstracts of the CHI Conference on Human Factors in Computing Systems. Association for Computing Machinery, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.87, + 0.492, + 0.942 + ], + "angle": 0, + "content": "[284] D. Kumar, Y. A. AbuHashem, and Z. Durmeric, \"Watch your language: Investigating content moderation with large language models,\" Proceedings of the International AAAI Conference on Web and Social Media, 2024." + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.942 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.112 + ], + "angle": 0, + "content": "[285] H. K. Choi, X. Du, and Y. Li, \"Safety-aware finetuning of large language models,\" in Neurips Safe Generative AI Workshop 2024, 2024. [Online]. Available: https://openreview.net/forum?id=SqL94fLSM7" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.112, + 0.923, + 0.17 + ], + "angle": 0, + "content": "[286] H. Ge, Y. Li, Q. Wang, Y. Zhang, and R. Tang, \"When backdoors speak: Understanding llm backdoor attacks through model-generated explanations,\" arXiv preprint arXiv:2411.12701, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.17, + 0.923, + 0.258 + ], + "angle": 0, + "content": "[287] B. Yi, T. Huang, S. Chen, T. Li, Z. Liu, Z. Chu, and Y. Li, \"Probe before you talk: Towards black-box defense against backdoor unalignment for large language models,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=EbxYDBhE3S" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.258, + 0.923, + 0.302 + ], + "angle": 0, + "content": "[288] B. Tran, J. Li, and A. Madry, \"Spectral signatures in backdoor attacks,\" in Advances in Neural Information Processing Systems. Curran Associates, Inc., 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.302, + 0.923, + 0.36 + ], + "angle": 0, + "content": "[289] S. Casper, L. Schulze, O. Patel, and D. Hadfield-Menell, \"Defending against unforeseen failure modes with latent adversarial training,\" 2024. [Online]. Available: https://arxiv.org/abs/2403.05030" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.36, + 0.923, + 0.432 + ], + "angle": 0, + "content": "[290] T. Huang, G. Bhattacharya, P. Joshi, J. Kimball, and L. Liu, \"Antidote: Post-fine-tuning safety alignment for large language models against harmful finetuning,\" 2024. [Online]. Available: https://arxiv.org/abs/2408.09600" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.432, + 0.923, + 0.476 + ], + "angle": 0, + "content": "[291] J. Li, \"Detecting instruction fine-tuning attack on language models with influence function,\" arXiv preprint arXiv:2504.09026, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.476, + 0.923, + 0.535 + ], + "angle": 0, + "content": "[292] X. Yi, S. Zheng, L. Wang, X. Wang, and L. He, \"A safety realignment framework via subspace-oriented model fusion for large language models,\" Knowledge-Based Systems, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.535, + 0.923, + 0.593 + ], + "angle": 0, + "content": "[293] M. Zhu, Y. Weng, L. Yang, Y. Wei, N. Zhang, and Y. Zhang, \"Locking down the finetuned LLMs safety,\" 2025. [Online]. Available: https://openreview.net/forum?id=YGoFl5KKFc" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.593, + 0.923, + 0.652 + ], + "angle": 0, + "content": "[294] D. Wu, X. Lu, Y. Zhao, and B. Qin, \"Separate the wheat from the chaff: A post-hoc approach to safety re-alignment for fine-tuned language models,\" 2025. [Online]. Available: https://arxiv.org/abs/2412.11041" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.652, + 0.923, + 0.724 + ], + "angle": 0, + "content": "[295] Y. Wang, T. Huang, L. Shen, H. Yao, H. Luo, R. Liu, N. Tan, J. Huang, and D. Tao, \"Panacea: Mitigating harmful fine-tuning for large language models via post-fine-tuning perturbation,\" 2025. [Online]. Available: https://arxiv.org/abs/2501.18100" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.724, + 0.923, + 0.811 + ], + "angle": 0, + "content": "[296] Q. Liu, C. Shang, L. Liu, N. Pappas, J. Ma, N. A. John, S. Doss, L. Marquez, M. Ballesteros, and Y. Benajiba, \"Unraveling and mitigating safety alignment degradation of vision-language models,\" 2025. [Online]. Available: https://openreview.net/forum?id=EEWpE9cR27" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.811, + 0.923, + 0.869 + ], + "angle": 0, + "content": "[297] S. Xu, L. Pang, Y. Zhu, H. Shen, and X. Cheng, \"Cross-modal safety mechanism transfer in large vision-language models,\" arXiv preprint arXiv:2410.12662, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.87, + 0.923, + 0.914 + ], + "angle": 0, + "content": "[298] S. Li, L. Yao, L. Zhang, and Y. Li, \"Safety layers in aligned large language models: The key to llm security,\" arXiv preprint arXiv:2408.17003, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.914, + 0.923, + 0.944 + ], + "angle": 0, + "content": "[299] W. Zhao, Z. Li, Y. Li, Y. Zhang, and J. Sun, \"Defending large language models against jailbreak" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "47" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.49, + 0.083 + ], + "angle": 0, + "content": "attacks via layer-specific editing,\" 2024. [Online]. Available: https://arxiv.org/abs/2405.18166" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.084, + 0.492, + 0.156 + ], + "angle": 0, + "content": "[300] NIST, \"Artificial intelligence risk management framework: Generative artificial intelligence profile (initial public draft),\" 2024, accessed: 2025-05-29. [Online]. Available: https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.800-1.ipd.pdf" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.157, + 0.492, + 0.213 + ], + "angle": 0, + "content": "[301] X. Qi, B. Wei, N. Carlini, Y. Huang, T. Xie, L. He, M. Jagielski, M. Nasr, P. Mittal, and P. Henderson, \"On Evaluating the Durability of Safeguards for Open-Weight LLMs,\" Dec. 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.215, + 0.492, + 0.301 + ], + "angle": 0, + "content": "[302] D. Rosati, J. Wehner, K. Williams, L. Bartoszcze, R. Gonzales, C. Maple, S. Majumdar, H. Sajjad, and F. Rudzicz, \"Representation Noising: A Defence Mechanism Against Harmful Finetuning,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, Nov. 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.302, + 0.492, + 0.374 + ], + "angle": 0, + "content": "[303] R. Tamirisa, B. Bharathi, L. Phan, A. Zhou, A. Gatti, T. Suresh, M. Lin, J. Wang, R. Wang, R. Arel, A. Zou, D. Song, B. Li, D. Hendrycks, and M. Mazeika, \"Tamper-Resistant Safeguards for Open-Weight LLMs,\" Feb. 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.375, + 0.492, + 0.447 + ], + "angle": 0, + "content": "[304] D. Rosati, J. Wehner, K. Williams, L. Bartoszcze, H. Sajjad, and F. Rudzicz, \"Immunization against harmful fine-tuning attacks,\" in Findings of the Association for Computational Linguistics: EMNLP 2024. Association for Computational Linguistics, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.448, + 0.492, + 0.52 + ], + "angle": 0, + "content": "[305] M. Mazeika, L. Phan, X. Yin, A. Zou, Z. Wang, N. Mu, E. Sakhaee, N. Li, S. Basart, B. Li et al., \"Harmbench: A standardized evaluation framework for automated red teaming and robust refusal,\" arXiv preprint arXiv:2402.04249, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.521, + 0.492, + 0.607 + ], + "angle": 0, + "content": "[306] P. Chao, E. Debenedetti, A. Robey, M. Andriushchenko, F. Croce, V. Sehwag, E. Dobriban, N. Flammarion, G. J. Pappas, F. Tramer et al., \"Jailbreakbench: An open robustness benchmark for jailbreaking large language models,\" arXiv preprint arXiv:2404.01318, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.608, + 0.492, + 0.664 + ], + "angle": 0, + "content": "[307] S. Liu, S. Cui, H. Bu, Y. Shang, and X. Zhang, \"Jail-bench: A comprehensive chinese security assessment benchmark for large language models,\" arXiv preprint arXiv:2502.18935, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.666, + 0.492, + 0.709 + ], + "angle": 0, + "content": "[308] J. Cui, W.-L. Chiang, I. Stoica, and C.-J. Hsieh, \"Or-bench: An over-refusal benchmark for large language models,\" arXiv preprint arXiv:2405.20947, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.71, + 0.492, + 0.782 + ], + "angle": 0, + "content": "[309] T. Xie, X. Qi, Y. Zeng, Y. Huang, U. M. Sehwag, K. Huang, L. He, B. Wei, D. Li, Y. Sheng et al., \"Sorry-bench: Systematically evaluating large language model safety refusal behaviors,\" arXiv preprint arXiv:2406.14598, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.783, + 0.492, + 0.855 + ], + "angle": 0, + "content": "[310] L. Zheng, W.-L. Chiang, Y. Sheng, S. Zhuang, Z. Wu, Y. Zhuang, Z. Lin, Z. Li, D. Li, E. Xing et al., \"Judging llm-as-a-judge with mt-bench and chatbot arena,\" Advances in Neural Information Processing Systems, vol. 36, pp. 46595-46623, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.856, + 0.492, + 0.914 + ], + "angle": 0, + "content": "[311] Z. Wang, S. Hu, S. Zhao, X. Lin, F. Juefei-Xu, Z. Li, L. Han, H. Subramanyam, L. Chen, J. Chen et al., \"Mllm-as-a-judge for image safety without human labeling,\" arXiv preprint arXiv:2501.00192, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.915, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[312] D. Rosati, J. Wehner, K. Williams, L. Bartoszcze, D. Atanasov, R. Gonzales, S. Majumdar, C. Maple," + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.923, + 0.098 + ], + "angle": 0, + "content": "H. Sajjad, and F. Rudzicz, \"Representation noising effectively prevents harmful fine-tuning on llms,\" arXiv e-prints, pp. arXiv-2405, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.098, + 0.923, + 0.199 + ], + "angle": 0, + "content": "[313] H. Zhang, J. Huang, K. Mei, Y. Yao, Z. Wang, C. Zhan, H. Wang, and Y. Zhang, \"Agent security bench (ASB): Formalizing and benchmarking attacks and defenses in LLM-based agents,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=V4y0CpX4hK" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.2, + 0.923, + 0.257 + ], + "angle": 0, + "content": "[314] T. Yuan, Z. He, L. Dong, Y. Wang, R. Zhao, T. Xia, L. Xu, B. Zhou, F. Li, Z. Zhang et al., \"R-judge: Benchmarking safety risk awareness for llm agents,\" arXiv preprint arXiv:2401.10019, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.259, + 0.923, + 0.316 + ], + "angle": 0, + "content": "[315] Z. Zhang, L. Lei, L. Wu, R. Sun, Y. Huang, C. Long, X. Liu, X. Lei, J. Tang, and M. Huang, \"Safetybench: Evaluating the safety of large language models,\" arXiv preprint arXiv:2309.07045, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.317, + 0.923, + 0.374 + ], + "angle": 0, + "content": "[316] L. Li, B. Dong, R. Wang, X. Hu, W. Zuo, D. Lin, Y. Qiao, and J. Shao, \"Salad-bench: A hierarchical and comprehensive safety benchmark for large language models,\" arXiv preprint arXiv:2402.05044, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.375, + 0.923, + 0.432 + ], + "angle": 0, + "content": "[317] K. Cobbe, V. Kosaraju, M. Bavarian, M. Chen, H. Jun, L. Kaiser, M. Plappert, J. Tworek, J. Hilton, R. Nakano et al., \"Training verifiers to solve math word problems,\" arXiv preprint arXiv:2110.14168, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.433, + 0.923, + 0.49 + ], + "angle": 0, + "content": "[318] S.-Y. Miao, C.-C. Liang, and K.-Y. Su, \"A diverse corpus for evaluating and developing english math word problem solvers,\" arXiv preprint arXiv:2106.15772, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.491, + 0.923, + 0.564 + ], + "angle": 0, + "content": "[319] E. Glazer, E. Erdil, T. Besiroglu, D. Chicharro, E. Chen, A. Gunning, C. F. Olsson, J.-S. Denain, A. Ho, E. d. O. Santos et al., \"Frontiermath: A benchmark for evaluating advanced mathematical reasoning in ai,\" arXiv preprint arXiv:2411.04872, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.565, + 0.923, + 0.622 + ], + "angle": 0, + "content": "[320] M. Chen, J. Tworek, H. Jun, Q. Yuan, H. P. D. O. Pinto, J. Kaplan, H. Edwards, Y. Burda, N. Joseph, G. Brockman et al., \"Evaluating large language models trained on code,\" arXiv preprint arXiv:2107.03374, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.623, + 0.923, + 0.68 + ], + "angle": 0, + "content": "[321] C. E. Jimenez, J. Yang, A. Wettig, S. Yao, K. Pei, O. Press, and K. Narasimhan, \"Swe-bench: Can language models resolve real-world github issues?\" arXiv preprint arXiv:2310.06770, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.681, + 0.923, + 0.724 + ], + "angle": 0, + "content": "[322] X. Zhang, J. Zhao, and Y. LeCun, \"Character-level convolutional networks for text classification,\" Advances in neural information processing systems, vol. 28, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.725, + 0.923, + 0.781 + ], + "angle": 0, + "content": "[323] H. Luo, Y. Jin, X. Liu, T. Shang, R. Chen, and Z. Liu, \"Geic: Universal and multilingual named entity recognition with large language models,\" arXiv preprint arXiv:2409.11022, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.782, + 0.923, + 0.84 + ], + "angle": 0, + "content": "[324] X. Li, T. Zhang, Y. Dubois, R. Taori, I. Gulrajani, C. Guestrin, P. Liang, and T. B. Hashimoto, \"Alpaca-eval: An automatic evaluator of instruction-following models,\" 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.841, + 0.923, + 0.914 + ], + "angle": 0, + "content": "[325] W.-L. Chiang, L. Zheng, Y. Sheng, A. N. Angelopoulos, T. Li, D. Li, B. Zhu, H. Zhang, M. Jordan, J. E. Gonzalez et al., \"Chatbot arena: An open platform for evaluating llms by human preference,\" in *Forty-first International Conference on Machine Learning*, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.915, + 0.923, + 0.944 + ], + "angle": 0, + "content": "[326] B. Gliwa, I. Mochol, M. Biesek, and A. Wawer, \"Samsum corpus: A human-annotated dialogue" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "48" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.492, + 0.083 + ], + "angle": 0, + "content": "dataset for abstractive summarization,\" arXiv preprint arXiv:1911.12237, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.084, + 0.492, + 0.127 + ], + "angle": 0, + "content": "[327] M. Macháček and O. Bojar, \"Results of the wmt14 metrics shared task,\" in Proceedings of the Ninth Workshop on Statistical Machine Translation, 2014, pp. 293-301." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.128, + 0.492, + 0.185 + ], + "angle": 0, + "content": "[328] X. Lu, D. Liu, Y. Yu, L. Xu, and J. Shao, \"X-boundary: Establishing exact safety boundary to shield llms from multi-turn jailbreaks without compromising usability,\" arXiv preprint arXiv:2502.09990, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.186, + 0.492, + 0.214 + ], + "angle": 0, + "content": "[329] OpenAI, \"Moderation api,\" https://platform.openai.com/docs/guides/moderation/overview, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.215, + 0.492, + 0.272 + ], + "angle": 0, + "content": "[330] H. Inan, K. Upasani, J. Chi, R. Rungta, K. Iyer, Y. Mao, M. Tontchev, Q. Hu, B. Fuller, D. Testuggine, and M. Khabsa, \"Llama guard: Llm-based input-output safeguard for human-ai conversations,\" CoRR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.273, + 0.492, + 0.33 + ], + "angle": 0, + "content": "[331] J. Ji, T. Qiu, B. Chen, B. Zhang, H. Lou, K. Wang, Y. Duan, Z. He, J. Zhou, Z. Zhang et al., \"Ai alignment: A comprehensive survey,\" arXiv preprint arXiv:2310.19852, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.331, + 0.492, + 0.389 + ], + "angle": 0, + "content": "[332] T. A. Qiu, Y. Zhang, X. Huang, J. Li, J. Ji, and Y. Yang, \"Progressgym: Alignment with a millennium of moral progress,\" Advances in Neural Information Processing Systems, vol. 37, pp. 14570-14607, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.39, + 0.492, + 0.447 + ], + "angle": 0, + "content": "[333] B. Wang, W. Chen, H. Pei, C. Xie, M. Kang, C. Zhang, C. Xu, Z. Xiong, R. Dutta, R. Schaeffer et al., \"Decoding trust: A comprehensive assessment of trustworthiness in gpt models.\" in NeurIPS, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.448, + 0.492, + 0.505 + ], + "angle": 0, + "content": "[334] S. Gehman, S. Gururangan, M. Sap, Y. Choi, and N. A. Smith, \"Realtoxicityprompts: Evaluating neural toxic degeneration in language models,\" arXiv preprint arXiv:2009.11462, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.507, + 0.492, + 0.55 + ], + "angle": 0, + "content": "[335] Y. Wang, H. Li, X. Han, P. Nakov, and T. Baldwin, \"Do-not-answer: A dataset for evaluating safeguards in llms,\" arXiv preprint arXiv:2308.13387, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.551, + 0.492, + 0.622 + ], + "angle": 0, + "content": "[336] M. Conover, R. Staats, A. Rane, G. Shani, K. Katz, A. Powell, A. Ross, A. Maas, and A. Zhang, \"Databricks-dolly: Introducing dolly-15k, democratizing the magic of instruction following,\" https://github.com/databrickslabs/dolly, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.623, + 0.492, + 0.693 + ], + "angle": 0, + "content": "[337] X. Wu, Y. Hao, K. Sun, Y. Chen, F. Zhu, R. Zhao, and H. Li, \"Human preference score v2: A solid benchmark for evaluating human preferences of text-to-image synthesis,\" arXiv preprint arXiv:2306.09341, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.695, + 0.492, + 0.767 + ], + "angle": 0, + "content": "[338] Y. Yan, S. Wang, J. Huo, H. Li, B. Li, J. Su, X. Gao, Y.-F. Zhang, T. Xu, Z. Chu et al., \"Errorradar: Benchmarking complex mathematical reasoning of multimodal large language models via error detection,\" arXiv preprint arXiv:2410.04509, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.768, + 0.492, + 0.812 + ], + "angle": 0, + "content": "[339] Q. Jin, B. Dhingra, Z. Liu, W. W. Cohen, and X. Lu, \"Pubmedqa: A dataset for biomedical research question answering,\" arXiv preprint arXiv:1909.06146, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.813, + 0.492, + 0.87 + ], + "angle": 0, + "content": "[340] K. M. Hermann, T. Kocisky, E. Grefenstette, L. Espeholt, W. Kay, M. Suleyman, and P. Blunsom, \"Teaching machines to read and comprehend,\" Advances in neural information processing systems, vol. 28, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.871, + 0.492, + 0.913 + ], + "angle": 0, + "content": "[341] S. Lin, J. Hilton, and O. Evans, \"Truthfulqa: Measuring how models mimic human falsehoods,\" arXiv preprint arXiv:2109.07958, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.914, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[342] Y. Mou, S. Zhang, and W. Ye, \"Sg-bench: Evaluating llm safety generalization across diverse tasks and" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.923, + 0.083 + ], + "angle": 0, + "content": "prompt types,\" Advances in Neural Information Processing Systems, vol. 37, pp. 123032-123054, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.084, + 0.923, + 0.141 + ], + "angle": 0, + "content": "[343] F. Jiang, Z. Xu, Y. Li, L. Niu, Z. Xiang, B. Li, B. Y. Lin, and R. Poovendran, \"Safechain: Safety of language models with long chain-of-thought reasoning capabilities,\" arXiv preprint arXiv:2502.12025, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.142, + 0.923, + 0.213 + ], + "angle": 0, + "content": "[344] T. Hartvigsen, S. Gabriel, H. Palangi, M. Sap, D. Ray, and E. Kamar, \"Toxigen: A large-scale machine-generated dataset for adversarial and implicit hate speech detection,\" arXiv preprint arXiv:2203.09509, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.215, + 0.923, + 0.272 + ], + "angle": 0, + "content": "[345] A. Souly, Q. Lu, D. Bowen, T. Trinh, E. Hsieh, S. Pandey, P. Abbeel, J. Svegliato, S. Emmons, O. Watkins et al., \"A strongreject for empty jailbreaks,\" arXiv preprint arXiv:2402.10260, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.273, + 0.923, + 0.358 + ], + "angle": 0, + "content": "[346] L. Jiang, K. Rao, S. Han, A. Ettinger, F. Brahman, S. Kumar, N. Mireshghallah, X. Lu, M. Sap, Y. Choi et al., \"Wildteaming at scale: From in-the-wild jailbreaks to (adversarily) safer language models,\" Advances in Neural Information Processing Systems, vol. 37, pp. 47094-47165, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.36, + 0.923, + 0.403 + ], + "angle": 0, + "content": "[347] D. Hendrycks, M. Mazeika, and T. Woodside, \"An overview of catastrophic ai risks,\" arXiv preprint arXiv:2306.12001, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.404, + 0.923, + 0.476 + ], + "angle": 0, + "content": "[348] B. Baker, J. Huizinga, L. Gao, Z. Dou, M. Y. Guan, A. Madry, W. Zaremba, J. Pachocki, and D. Farhi, \"Monitoring reasoning models for misbehavior and the risks of promoting obfuscation,\" arXiv preprint arXiv:2503.11926, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.477, + 0.923, + 0.549 + ], + "angle": 0, + "content": "[349] T. Hagendorff, \"Deception abilities emerged in large language models,\" Proceedings of the National Academy of Sciences, vol. 121, no. 24, p. e2317967121, 2024. [Online]. Available: https://www.pnas.org/doi/abs/10.1073/pnas.2317967121" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.55, + 0.923, + 0.606 + ], + "angle": 0, + "content": "[350] P. S. Park, S. Goldstein, A. O'Gara, M. Chen, and D. Hendrycks, \"Ai deception: A survey of examples, risks, and potential solutions,\" Patterns, vol. 5, no. 5, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.608, + 0.923, + 0.637 + ], + "angle": 0, + "content": "[351] OpenAI, \"Gpt-4 technical report,\" ArXiv, vol. abs/2303.08774, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.638, + 0.923, + 0.695 + ], + "angle": 0, + "content": "[352] F. Ward, F. Toni, F. Belardinelli, and T. Everitt, \"Honesty is the best policy: defining and mitigating ai deception,\" Advances in neural information processing systems, vol. 36, pp. 2313-2341, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.695, + 0.923, + 0.753 + ], + "angle": 0, + "content": "[353] J. Scheurer, M. Balesni, and M. Hobbahn, \"Large language models can strategically deceive their users when put under pressure,\" arXiv preprint arXiv:2311.07590, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.754, + 0.923, + 0.811 + ], + "angle": 0, + "content": "[354] S. Chern, Z. Hu, Y. Yang, E. Chern, Y. Guo, J. Jin, B. Wang, and P. Liu, \"Behonest: Benchmarking honesty in large language models,\" arXiv preprint arXiv:2406.13261, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.812, + 0.923, + 0.855 + ], + "angle": 0, + "content": "[355] A. O'Gara, \"Hoodwinked: Deception and cooperation in a text-based game for language models,\" arXiv preprint arXiv:2308.01404, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.856, + 0.923, + 0.942 + ], + "angle": 0, + "content": "[356] M. F. A. R. D. T. (FAIR)†, A. Bakhtin, N. Brown, E. Dinan, G. Farina, C. Flaherty, D. Fried, A. Goff, J. Gray, H. Hu et al., \"Human-level play in the game of diplomacy by combining language models with strategic reasoning,\" Science, vol. 378, no. 6624, pp. 1067-1074, 2022." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.942 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "49" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.112 + ], + "angle": 0, + "content": "[357] L. Schulz, N. Alon, J. Rosenschein, and P. Dayan, \"Emergent deception and skepticism via theory of mind,\" in First Workshop on Theory of Mind in Communicating Agents, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.113, + 0.492, + 0.168 + ], + "angle": 0, + "content": "[358] A. Meinke, B. Schoen, J. Scheurer, M. Balesni, R. Shah, and M. Hobbahn, \"Frontier models are capable of in-context scheming,\" arXiv preprint arXiv:2412.04984, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.17, + 0.492, + 0.228 + ], + "angle": 0, + "content": "[359] R. Greenblatt, C. Denison, B. Wright, F. Roger, M. Mac-Diarmid, S. Marks, J. Treutlein, T. Belonax, J. Chen, D. Duvenaud et al., \"Alignment faking in large language models,\" arXiv preprint arXiv:2412.14093, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.229, + 0.492, + 0.316 + ], + "angle": 0, + "content": "[360] A. Pan, J. S. Chan, A. Zou, N. Li, S. Basart, T. Woodside, H. Zhang, S. Emmons, and D. Hendrycks, \"Do the rewards justify the means? measuring trade-offs between rewards and ethical behavior in the machiavelli benchmark,\" in International conference on machine learning. PMLR, 2023, pp. 26837-26867." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.317, + 0.492, + 0.374 + ], + "angle": 0, + "content": "[361] L. Vaugrante, F. Carlon, M. Menke, and T. Hagen-dorff, \"Compromising honesty and harmlessness in language models via deception attacks,\" arXiv preprint arXiv:2502.08301, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.375, + 0.492, + 0.417 + ], + "angle": 0, + "content": "[362] J. Ji, K. Wang, T. Qiu, B. Chen, J. Zhou, C. Li, H. Lou, and Y. Yang, \"Language models resist alignment,\" arXiv preprint arXiv:2406.06144, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.418, + 0.492, + 0.475 + ], + "angle": 0, + "content": "[363] L. Bürger, F. A. Hamprecht, and B. Nadler, \"Truth is universal: Robust detection of lies in llms,\" Advances in Neural Information Processing Systems, vol. 37, pp. 138-393-138-431, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.477, + 0.492, + 0.533 + ], + "angle": 0, + "content": "[364] OpenAI, \"Detecting misbehavior in frontier reasoning models,\" https://openai.com/index/chain-of-thought-monitoring/, Mar. 2025, accessed: 2025-05-14." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.535, + 0.492, + 0.579 + ], + "angle": 0, + "content": "[365] T. Everitt, V. Krakovna, L. Orseau, M. Hutter, and S. Legg, \"Reinforcement learning with a corrupted reward channel,\" arXiv preprint arXiv:1705.08417, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.579, + 0.492, + 0.622 + ], + "angle": 0, + "content": "[366] S. Zhuang and D. Hadfield-Menell, \"Consequences of misaligned ai,\" Advances in Neural Information Processing Systems, vol. 33, pp. 15763-15773, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.622, + 0.492, + 0.709 + ], + "angle": 0, + "content": "[367] V. Krakovna, J. Uesato, V. Mikulik, M. Rahtz, T. Everitt, R. Kumar, Z. Kenton, J. Leike, and S. Legg, \"Specification gaming: the flip side of ai ingenuity,\" 2020, accessed: 2025-03-30. [Online]. Available: https://deepmind.google/discover/blog/ specification-gaming-the-flip-side-of-ai-ingenuity/" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.71, + 0.492, + 0.753 + ], + "angle": 0, + "content": "[368] D. Amodei, C. Olah, J. Steinhardt, P. Christiano, J. Schulman, and D. Mané, \"Concrete problems in air safety,\" arXiv preprint arXiv:1606.06565, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.754, + 0.492, + 0.811 + ], + "angle": 0, + "content": "[369] L. Weng, \"Reward hacking in reinforcement learning,\" 2024, accessed: 2025-03-30. [Online]. Available: https://lilianweng.github.io/posts/2024-11-28-reward-hacking" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.812, + 0.492, + 0.883 + ], + "angle": 0, + "content": "[370] T. Everitt, M. Hutter, R. Kumar, and V. Krakovna, \"Reward tampering problems and solutions in reinforcement learning: A causal influence diagram perspective,\" Synthese, vol. 198, no. Suppl 27, pp. 6435-6467, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.884, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[371] J. Skalse, N. Howe, D. Krasheninnikov, and D. Krueger, \"Defining and characterizing reward gaming,\" Advances in Neural Information Processing Systems, vol. 35, pp. 9460-9471, 2022." + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.127 + ], + "angle": 0, + "content": "[372] S. Casper, X. Davies, C. Shi, T. K. Gilbert, J. Scheurer, J. Rando, R. Freedman, T. Korbak, D. Lindner, P. Freire et al., \"Open problems and fundamental limitations of reinforcement learning from human feedback,\" arXiv preprint arXiv:2307.15217, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.128, + 0.923, + 0.183 + ], + "angle": 0, + "content": "[373] L. Gao, J. Schulman, and J. Hilton, \"Scaling laws for reward model overoptimization,\" in International Conference on Machine Learning. PMLR, 2023, pp. 10835-10866." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.185, + 0.923, + 0.271 + ], + "angle": 0, + "content": "[374] E. Perez, S. Ringer, K. Lukosiute, K. Nguyen, E. Chen, S. Heiner, C. Pettit, C. Olsson, S. Kundu, S. Kadavath et al., \"Discovering language model behaviors with model-written evaluations,\" in Findings of the Association for Computational Linguistics: ACL 2023, 2023, pp. 13387-13434." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.272, + 0.923, + 0.346 + ], + "angle": 0, + "content": "[375] C. Denison, M. MacDiarmid, F. Berez, D. Duvenaud, S. Kravec, S. Marks, N. Schiefer, R. Soklaski, A. Tamkin, J. Kaplan et al., \"Sycophancy to subterfuge: Investigating reward-tampering in large language models,\" arXiv preprint arXiv:2406.10162, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.346, + 0.923, + 0.389 + ], + "angle": 0, + "content": "[376] P. Singhal, T. Goyal, J. Xu, and G. Durrett, \"A long way to go: Investigating length correlations in rlhf,\" arXiv preprint arXiv:2310.03716, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.389, + 0.923, + 0.461 + ], + "angle": 0, + "content": "[377] F. Bianchi, M. Suzgun, G. Attanasio, P. Röttger, D. Jurafsky, T. Hashimoto, and J. Zou, \"Safety-tuned llamas: Lessons from improving the safety of large language models that follow instructions,\" arXiv preprint arXiv:2309.07875, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.461, + 0.923, + 0.504 + ], + "angle": 0, + "content": "[378] M. Tegmark and S. Omohundro, \"Provably safe systems: the only path to controllable agi,\" arXiv preprint arXiv:2309.01933, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.505, + 0.923, + 0.579 + ], + "angle": 0, + "content": "[379] D. Dalrymple, J. Skalse, Y. Bengio, S. Russell, M. Tegmark, S. Seshia, S. Omohundro, C. Szegedy, B. Goldhaber, N. Ammann et al., \"Towards guaranteed safe ai: A framework for ensuring robust and reliable ai systems,\" arXiv preprint arXiv:2405.06624, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.579, + 0.923, + 0.637 + ], + "angle": 0, + "content": "[380] A. Caliskan, J. J. Bryson, and A. Narayanan, \"Semantics derived automatically from language corpora contain human-like biases,\" Science, vol. 356, no. 6334, pp. 183-186, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.637, + 0.923, + 0.709 + ], + "angle": 0, + "content": "[381] R. Xu, Z. Zhou, T. Zhang, Z. Qi, S. Yao, K. Xu, W. Xu, and H. Qiu, \"Walking in others' shoes: How perspective-taking guides large language models in reducing toxicity and bias,\" arXiv preprint arXiv:2407.15366, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.71, + 0.923, + 0.768 + ], + "angle": 0, + "content": "[382] D. Acemoglu and P. Restrepo, \"Artificial intelligence, automation, and work,\" in The economics of artificial intelligence: An agenda. University of Chicago Press, 2018, pp. 197-236." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.768, + 0.923, + 0.825 + ], + "angle": 0, + "content": "[383] J. Mokander, J. Schuett, H. R. Kirk, and L. Floridi, \"Auditing large language models: a three-layered approach,\" AI and Ethics, vol. 4, no. 4, pp. 1085-1115, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.826, + 0.923, + 0.899 + ], + "angle": 0, + "content": "[384] M. Anderljung, J. Barnhart, A. Korinek, J. Leung, C. O'Keefe, J. Whittlestone, S. Avin, M. Brundage, J. Bullock, D. Cass-Beggs et al., \"Frontier ai regulation: Managing emerging risks to public safety,\" arXiv preprint arXiv:2307.03718, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.899, + 0.923, + 0.929 + ], + "angle": 0, + "content": "[385] A. Mannes, \"Governance, risk, and artificial intelligence,\" *Ai Magazine*, vol. 41, no. 1, pp. 61-69, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.929, + 0.923, + 0.944 + ], + "angle": 0, + "content": "[386] L. Koessler and J. Schuett, \"Risk assessment at agi" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "50" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.492, + 0.098 + ], + "angle": 0, + "content": "companies: A review of popular risk assessment techniques from other safety-critical industries,\" arXiv preprint arXiv:2307.08823, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.098, + 0.492, + 0.156 + ], + "angle": 0, + "content": "[387] J. Schuett, N. Dreksler, M. Anderljung, D. McCaffary, L. Heim, E. Bluemke, and B. Garfinkel, \"Towards best practices in agi safety and governance: A survey of expert opinion,\" arXiv preprint arXiv:2305.07153, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.157, + 0.492, + 0.213 + ], + "angle": 0, + "content": "[388] L. Ho, J. Barnhart, R. Trager, Y. Bengio, M. Brundage, A. Carnegie, R. Chowdhury, A. Dafoe, G. Hadfield, M. Levi et al., \"International institutions for advanced ai,\" arXiv preprint arXiv:2307.04699, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.215, + 0.492, + 0.257 + ], + "angle": 0, + "content": "[389] M. M. Maas, \"Aligning ai regulation to sociotechnical change,\" in The Oxford Handbook of AI Governance, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.259, + 0.492, + 0.33 + ], + "angle": 0, + "content": "[390] M. Kinniment, L. J. K. Sato, H. Du, B. Goodrich, M. Hasin, L. Chan, L. H. Miles, T. R. Lin, H. Wijk, J. Burget et al., \"Evaluating language-model agents on realistic autonomous tasks,\" arXiv preprint arXiv:2312.11671, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.332, + 0.492, + 0.403 + ], + "angle": 0, + "content": "[391] J. Tallberg, E. Erman, M. Furendal, J. Geith, M. Klamberg, and M. Lundgren, \"The global governance of artificial intelligence: Next steps for empirical and normative research,\" International Studies Review, vol. 25, no. 3, p. viad040, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.405, + 0.492, + 0.433 + ], + "angle": 0, + "content": "[392] OECD, \"OECD Principles on Artificial Intelligence,\" https://oecd.ai/en/ai-principles, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.434, + 0.492, + 0.476 + ], + "angle": 0, + "content": "[393] UNESCO, \"Recommendation on the Ethics of Artificial Intelligence,\" https://unesdoc.unesco.org/ark:/48223/pf0000381137, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.477, + 0.492, + 0.577 + ], + "angle": 0, + "content": "[394] E. Seger, N. Dreksler, R. Moulange, E. Dardaman, J. Schuett, K. Wei, C. Winter, M. Arnold, S. O. hEigeartaigh, A. Korinek et al., \"Open-sourcing highly capable foundation models: An evaluation of risks, benefits, and alternative methods for pursuing open-source objectives,\" arXiv preprint arXiv:2311.09227, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.579, + 0.492, + 0.636 + ], + "angle": 0, + "content": "[395] F. Urbina, F. Lentzos, C. Invernizzi, and S. Ekins, \"Dual use of artificial-intelligence-powered drug discovery,\" Nature machine intelligence, vol. 4, no. 3, pp. 189-191, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.638, + 0.492, + 0.68 + ], + "angle": 0, + "content": "[396] Meta, \"Meta and Microsoft introduce the next generation of Llama,\" https://ai.meta.com/blog/llama-2, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.681, + 0.492, + 0.751 + ], + "angle": 0, + "content": "[397] E. Mostaque, \"Democratizing ai, stable diffusion & generative models,\" https://exchange scale.com/public/videos/emad-mostaque-stability-ai-stable-diffusion-open-sou2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.754, + 0.492, + 0.825 + ], + "angle": 0, + "content": "[398] J. A. Goldstein, G. Sastry, M. Musser, R. DiResta, M. Gentzel, and K. Sedova, \"Generative language models and automated influence operations: Emerging threats and potential mitigations,\" arXiv preprint arXiv:2301.04246, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.827, + 0.492, + 0.898 + ], + "angle": 0, + "content": "[399] I. Solaiman, M. Brundage, J. Clark, A. Askell, A. Herbert-Voss, J. Wu, A. Radford, G. Krueger, J. W. Kim, S. Kreps et al., \"Release strategies and the social impacts of language models,\" arXiv preprint arXiv:1908.09203, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.9, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[400] P. Chavez, \"An ai challenge: Balancing open and closed systems,\" https://cepa.org/article/an-ai-challenge-balancing-open-and-closed-systems," + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.055, + 0.586, + 0.067 + ], + "angle": 0, + "content": "2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.068, + 0.923, + 0.127 + ], + "angle": 0, + "content": "[401] N. Zhang, Y. Yao, B. Tian, P. Wang, S. Deng, M. Wang, Z. Xi, S. Mao, J. Zhang, Y. Ni et al., \"A comprehensive study of knowledge editing for large language models,\" arXiv preprint arXiv:2401.01286, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.128, + 0.923, + 0.184 + ], + "angle": 0, + "content": "[402] J. Fang, H. Jiang, K. Wang, Y. Ma, X. Wang, X. He, and T.-s. Chua, \"Alphaedit: Null-space constrained knowledge editing for language models,\" arXiv preprint arXiv:2410.02355, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.186, + 0.923, + 0.257 + ], + "angle": 0, + "content": "[403] Z. Zhang, Y. Zhou, X. Zhao, T. Che, and L. Lyu, \"Prompt certified machine unlearning with randomized gradient smoothing and quantization,\" Advances in Neural Information Processing Systems, vol. 35, pp. 13433-13455, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.259, + 0.923, + 0.33 + ], + "angle": 0, + "content": "[404] T. Che, Y. Zhou, Z. Zhang, L. Lyu, J. Liu, D. Yan, D. Dou, and J. Huan, \"Fast federated machine unlearning with nonlinear functional theory,\" in International conference on machine learning. PMLR, 2023, pp. 4241-4268." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.332, + 0.923, + 0.374 + ], + "angle": 0, + "content": "[405] W. Wang, Z. Tian, C. Zhang, and S. Yu, \"Machine unlearning: A comprehensive survey,\" arXiv preprint arXiv:2405.07406, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.375, + 0.923, + 0.433 + ], + "angle": 0, + "content": "[406] S. Liu, Y. Yao, J. Jia, S. Casper, N. Baracaldo, P. Hase, Y. Yao, C. Y. Liu, X. Xu, H. Li et al., \"Rethinking machine unlearning for large language models,\" Nature Machine Intelligence, pp. 1-14, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.435, + 0.923, + 0.476 + ], + "angle": 0, + "content": "[407] Y. Yao, X. Xu, and Y. Liu, \"Large language model unlearning,\" Advances in Neural Information Processing Systems, vol. 37, pp. 105-425-105-475, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.477, + 0.923, + 0.533 + ], + "angle": 0, + "content": "[408] C. Ding, J. Wu, Y. Yuan, J. Lu, K. Zhang, A. Su, X. Wang, and X. He, \"Unified parameter-efficient unlearning for llms,\" arXiv preprint arXiv:2412.00383, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.535, + 0.923, + 0.591 + ], + "angle": 0, + "content": "[409] Z. Li, H. Jiang, H. Chen, B. Bi, Z. Zhou, F. Sun, J. Fang, and X. Wang, \"Reinforced lifelong editing for language models,\" arXiv preprint arXiv:2502.05759, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.594, + 0.923, + 0.636 + ], + "angle": 0, + "content": "[410] E. Mitchell, C. Lin, A. Bosselut, C. Finn, and C. D. Manning, \"Fast model editing at scale,\" arXiv preprint arXiv:2110.11309, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.638, + 0.923, + 0.68 + ], + "angle": 0, + "content": "[411] N. De Cao, W. Aziz, and I. Titov, \"Editing factual knowledge in language models,\" arXiv preprint arXiv:2104.08164, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.681, + 0.923, + 0.739 + ], + "angle": 0, + "content": "[412] P. Wang, Z. Li, N. Zhang, Z. Xu, Y. Yao, Y. Jiang, P. Xie, F. Huang, and H. Chen, \"Wise: Rethinking the knowledge memory for lifelong model editing of large language models,\" arXiv preprint arXiv:2405.14768, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.74, + 0.923, + 0.81 + ], + "angle": 0, + "content": "[413] T. Hartvigsen, S. Sankaranarayanan, H. Palangi, Y. Kim, and M. Ghassemi, \"Aging with grace: Lifelong model editing with discrete key-value adaptors,\" Advances in Neural Information Processing Systems, vol. 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.812, + 0.923, + 0.869 + ], + "angle": 0, + "content": "[414] H. Jiang, J. Fang, N. Zhang, G. Ma, M. Wan, X. Wang, X. He, and T.-s. Chua, \"Anyedit: Edit any knowledge encoded in language models,\" arXiv preprint arXiv:2502.05628, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.87, + 0.923, + 0.928 + ], + "angle": 0, + "content": "[415] H. Jiang, J. Fang, T. Zhang, A. Zhang, R. Wang, T. Liang, and X. Wang, \"Neuron-level sequential editing for large language models,\" arXiv preprint arXiv:2410.04045, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.929, + 0.923, + 0.944 + ], + "angle": 0, + "content": "[416] K. Meng, D. Bau, A. Andonian, and Y. Belinkov," + }, + { + "type": "list", + "bbox": [ + 0.509, + 0.055, + 0.923, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.921, + 0.043 + ], + "angle": 0, + "content": "51" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.491, + 0.098 + ], + "angle": 0, + "content": "\"Locating and editing factual associations in gpt,\" Advances in Neural Information Processing Systems, vol. 35, pp. 17359-17372, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.098, + 0.492, + 0.155 + ], + "angle": 0, + "content": "[417] A. Prasad, P. Hase, X. Zhou, and M. Bansal, \"Grips: Gradient-free, edit-based instruction search for prompting large language models,\" arXiv preprint arXiv:2203.07281, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.157, + 0.492, + 0.198 + ], + "angle": 0, + "content": "[418] G. Gangadhar and K. Stratos, \"Model editing by standard fine-tuning,\" arXiv preprint arXiv:2402.11078, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.2, + 0.492, + 0.257 + ], + "angle": 0, + "content": "[419] E. Mitchell, C. Lin, A. Bosselut, C. D. Manning, and C. Finn, \"Memory-based model editing at scale,\" in International Conference on Machine Learning. PMLR, 2022, pp. 15817-15831." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.259, + 0.492, + 0.316 + ], + "angle": 0, + "content": "[420] Y. Yao, P. Wang, B. Tian, S. Cheng, Z. Li, S. Deng, H. Chen, and N. Zhang, \"Editing large language models: Problems, methods, and opportunities,\" arXiv preprint arXiv:2305.13172, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.317, + 0.492, + 0.36 + ], + "angle": 0, + "content": "[421] K. Meng, A. S. Sharma, A. Andonian, Y. Belinkov, and D. Bau, \"Mass-editing memory in a transformer,\" arXiv preprint arXiv:2210.07229, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.361, + 0.492, + 0.417 + ], + "angle": 0, + "content": "[422] J.-C. Gu, H.-X. Xu, J.-Y. Ma, P. Lu, Z.-H. Ling, K.-W. Chang, and N. Peng, \"Model editing can hurt general abilities of large language models,\" arXiv e-prints, pp. arXiv-2401, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.419, + 0.492, + 0.476 + ], + "angle": 0, + "content": "[423] X. Li, S. Li, S. Song, J. Yang, J. Ma, and J. Yu, \"Pmet: Precise model editing in a transformer,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 17, 2024, pp. 18564-18572." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.477, + 0.492, + 0.52 + ], + "angle": 0, + "content": "[424] M. Zhang, X. Ye, Q. Liu, P. Ren, S. Wu, and Z. Chen, \"Knowledge graph enhanced large language model editing,\" arXiv preprint arXiv:2402.13593, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.521, + 0.492, + 0.564 + ], + "angle": 0, + "content": "[425] C. Chen, B. Huang, Z. Li, Z. Chen, S. Lai, X. Xu, J.-C. Gu, J. Gu, H. Yao, C. Xiao et al., \"Can editing llms inject harm?\" arXiv preprint arXiv:2407.20224, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.565, + 0.492, + 0.621 + ], + "angle": 0, + "content": "[426] M. Wang, N. Zhang, Z. Xu, Z. Xi, S. Deng, Y. Yao, Q. Zhang, L. Yang, J. Wang, and H. Chen, \"Detoxifying large language models via knowledge editing,\" arXiv preprint arXiv:2403.14472, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.622, + 0.492, + 0.679 + ], + "angle": 0, + "content": "[427] C. Zheng, L. Li, Q. Dong, Y. Fan, Z. Wu, J. Xu, and B. Chang, \"Can we edit factual knowledge by in-context learning?\" arXiv preprint arXiv:2305.12740, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.681, + 0.492, + 0.737 + ], + "angle": 0, + "content": "[428] Y. Li, T. Li, K. Chen, J. Zhang, S. Liu, W. Wang, T. Zhang, and Y. Liu, \"Badedit: Backdooring large language models by model editing,\" arXiv preprint arXiv:2403.13355, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.739, + 0.492, + 0.796 + ], + "angle": 0, + "content": "[429] K. Grimes, M. Christiani, D. Shriver, and M. Connor, \"Concept-rot: Poisoning concepts in large language models with model editing,\" arXiv preprint arXiv:2412.13341, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.798, + 0.492, + 0.854 + ], + "angle": 0, + "content": "[430] X. Wu, J. Li, M. Xu, W. Dong, S. Wu, C. Bian, and D. Xiong, \"Depn: Detecting and editing privacy neurons in pretrained language models,\" arXiv preprint arXiv:2310.20138, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.855, + 0.492, + 0.899 + ], + "angle": 0, + "content": "[431] X. Li, Z. Li, Y. Kosuga, Y. Yoshida, and V. Bian, \"Precision knowledge editing: Enhancing safety in large language models,\" arXiv preprint arXiv:2410.03772, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.9, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[432] X. Hu, D. Li, B. Hu, Z. Zheng, Z. Liu, and M. Zhang, \"Separate the wheat from the chaff: Model deficiency unlearning via parameter-efficient module op" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.922, + 0.083 + ], + "angle": 0, + "content": "eration,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 16, 2024, pp. 18252-18260." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.084, + 0.922, + 0.14 + ], + "angle": 0, + "content": "[433] T. Yang, L. Dai, Z. Liu, X. Wang, M. Jiang, Y. Tian, and X. Zhang, \"Cliperase: Efficient unlearning of visual-textual associations in clip,\" arXiv preprint arXiv:2410.23330, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.142, + 0.922, + 0.199 + ], + "angle": 0, + "content": "[434] R. Gandikota, J. Materzynska, J. Fiotto-Kaufman, and D. Bau, \"Erasing concepts from diffusion models,\" 2023 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 2426-2436, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.2, + 0.922, + 0.272 + ], + "angle": 0, + "content": "[435] E. Zhang, K. Wang, X. Xu, Z. Wang, and H. Shi, \"Forget-me-not: Learning to forget in text-to-image diffusion models,\" 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pp. 1755-1764, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.273, + 0.922, + 0.343 + ], + "angle": 0, + "content": "[436] C. Fan, J. Liu, Y. Zhang, D. Wei, E. Wong, and S. Liu, \"Salun: Empowering machine unlearning via gradient-based weight saliency in both image classification and generation,\" ArXiv, vol. abs/2310.12508, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.345, + 0.922, + 0.403 + ], + "angle": 0, + "content": "[437] Z. Huang, X. Cheng, J. Zheng, H. Wang, Z. He, T. Li, and X. Huang, \"Unified gradient-based machine unlearning with remain geometry enhancement,\" ArXiv, vol. abs/2409.19732, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.404, + 0.922, + 0.462 + ], + "angle": 0, + "content": "[438] A. Blanco-Justicia, J. Domingo-Ferrer, N. M. Jebreel, B. Manzanares-Salor, and D. Sánchez, \"Unlearning in large language models: We are not there yet,\" Computer, vol. 58, no. 1, pp. 97-100, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.463, + 0.922, + 0.535 + ], + "angle": 0, + "content": "[439] S. Dai, C. Xu, S. Xu, L. Pang, Z. Dong, and J. Xu, \"Bias and unfairness in information retrieval systems: New challenges in the llm era,\" in Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, 2024, pp. 6437-6447." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.536, + 0.922, + 0.578 + ], + "angle": 0, + "content": "[440] G. Nicolas and A. Caliskan, \"A taxonomy of stereotype content in large language models,\" arXiv preprint arXiv:2408.00162, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.579, + 0.922, + 0.636 + ], + "angle": 0, + "content": "[441] S. Wang, R. Li, X. Chen, Y. Yuan, D. F. Wong, and M. Yang, \"Exploring the impact of personality traits on llm bias and toxicity,\" arXiv preprint arXiv:2502.12566, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.637, + 0.922, + 0.709 + ], + "angle": 0, + "content": "[442] A. Liu, Q. Sheng, and X. Hu, \"Preventing and detecting misinformation generated by large language models,\" in Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval, 2024, pp. 3001-3004." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.71, + 0.922, + 0.767 + ], + "angle": 0, + "content": "[443] Q. Zhang, H. Qiu, D. Wang, H. Qian, Y. Li, T. Zhang, and M. Huang, \"Understanding the dark side of lms' intrinsic self-correction,\" arXiv preprint arXiv:2412.14959, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.768, + 0.922, + 0.868 + ], + "angle": 0, + "content": "[444] R. Xu, B. Lin, S. Yang, T. Zhang, W. Shi, T. Zhang, Z. Fang, W. Xu, and H. Qiu, \"The earth is flat because...: Investigating llms' belief towards misinformation via persuasive conversation,\" in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2024, pp. 16259-16303." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.87, + 0.922, + 0.913 + ], + "angle": 0, + "content": "[445] Z. Liu, G. Dou, Z. Tan, Y. Tian, and M. Jiang, \"Machine unlearning in generative ai: A survey,\" arXiv preprint arXiv:2407.20516, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.914, + 0.922, + 0.943 + ], + "angle": 0, + "content": "[446] Y. Qu, M. Ding, N. Sun, K. Thilakarathna, T. Zhu, and D. Niyato, \"The frontier of data erasure: Machine" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.922, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "52" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.49, + 0.083 + ], + "angle": 0, + "content": "unlearning for large language models,\" arXiv preprint arXiv:2403.15779, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.084, + 0.492, + 0.156 + ], + "angle": 0, + "content": "[447] A. Blanco-Justicia, N. Jebreel, B. Manzanares-Salor, D. Sánchez, J. Domingo-Ferrer, G. Collell, and K. Eeik Tan, \"Digital forgetting in large language models: A survey of unlearning methods,\" Artificial Intelligence Review, vol. 58, no. 3, p. 90, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.157, + 0.492, + 0.213 + ], + "angle": 0, + "content": "[448] N. Li, C. Zhou, Y. Gao, H. Chen, Z. Zhang, B. Kuang, and A. Fu, \"Machine unlearning: Taxonomy, metrics, applications, challenges, and prospects,\" IEEE Transactions on Neural Networks and Learning Systems, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.215, + 0.492, + 0.257 + ], + "angle": 0, + "content": "[449] C. Gao, L. Wang, C. Weng, X. Wang, and Q. Zhu, \"Practical unlearning for large language models,\" arXiv preprint arXiv:2407.10223, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.259, + 0.492, + 0.316 + ], + "angle": 0, + "content": "[450] P. Thaker, S. Hu, N. Kale, Y. Maurya, Z. S. Wu, and V. Smith, \"Position: Llm unlearning benchmarks are weak measures of progress,\" arXiv preprint arXiv:2410.02879, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.317, + 0.492, + 0.387 + ], + "angle": 0, + "content": "[451] K. Zhao, M. Kurmanji, G.-O. Bärbulescu, E. Triantafillou, and P. Triantafillou, \"What makes unlearning hard and what to do about it,\" Advances in Neural Information Processing Systems, vol. 37, pp. 12293-12333, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.389, + 0.492, + 0.447 + ], + "angle": 0, + "content": "[452] W. Wang, M. Zhang, X. Ye, Z. Ren, Z. Chen, and P. Ren, \"Uipe: Enhancing llm unlearning by removing knowledge related to forgetting targets,\" arXiv preprint arXiv:2503.04693, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.448, + 0.492, + 0.506 + ], + "angle": 0, + "content": "[453] H. Wang, Y. Jing, H. Sun, Y. Wang, J. Wang, J. Liao, and D. Tao, \"Erasing without remembering: Safeguarding knowledge forgetting in large language models,\" arXiv preprint arXiv:2502.19982, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.507, + 0.492, + 0.564 + ], + "angle": 0, + "content": "[454] T. Tran, R. Liu, and L. Xiong, \"Tokens for learning, tokens for unlearning: Mitigating membership inference attacks in large language models via dual-purpose training,\" arXiv preprint arXiv:2502.19726, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.565, + 0.492, + 0.622 + ], + "angle": 0, + "content": "[455] H. Xu, N. Zhao, L. Yang, S. Zhao, S. Deng, M. Wang, B. Hooi, N. Oo, H. Chen, and N. Zhang, \"Relearn: Unlearning via learning for large language models,\" arXiv preprint arXiv:2502.11190, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.623, + 0.492, + 0.68 + ], + "angle": 0, + "content": "[456] Q. Zhang, H. Qiu, D. Wang, Y. Li, T. Zhang, W. Zhu, H. Weng, L. Yan, and C. Zhang, \"Large scale knowledge washing,\" in The Thirteenth International Conference on Learning Representations, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.681, + 0.492, + 0.739 + ], + "angle": 0, + "content": "[457] A. Thudi, H. Jia, I. Shumailov, and N. Papernot, \"On the necessity of auditable algorithmic definitions for machine unlearning,\" in 31st USENIX security symposium (USENIX Security 22), 2022, pp. 4007-4022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.74, + 0.492, + 0.782 + ], + "angle": 0, + "content": "[458] S. Goel, A. Prabhu, P. Torr, P. Kumaraguru, and A. Sanyal, \"Corrective machine unlearning,\" Transactions on Machine Learning Research." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.784, + 0.492, + 0.855 + ], + "angle": 0, + "content": "[459] A. Thudi, G. Deza, V. Chandrasekaran, and N. Papernot, \"Unrolling sgd: Understanding factors influencing machine unlearning,\" in 2022 IEEE 7th European Symposium on Security and Privacy (EuroS&P). IEEE, 2022, pp. 303-319." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.856, + 0.492, + 0.899 + ], + "angle": 0, + "content": "[460] B. Liu, Q. Liu, and P. Stone, \"Continual learning and private unlearning,\" in Conference on Lifelong Learning Agents. PMLR, 2022, pp. 243-254." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.9, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[461] Q. P. Nguyen, B. K. H. Low, and P. Jaillet, \"Variational bayesian unlearning,\" Advances in Neural Information Processing Systems, vol. 33, pp. 16025-16036, 2020." + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.112 + ], + "angle": 0, + "content": "[462] L. Wang, T. Chen, W. Yuan, X. Zeng, K.-F. Wong, and H. Yin, \"Kga: A general machine unlearning framework based on knowledge gap alignment,\" arXiv preprint arXiv:2305.06535, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.113, + 0.923, + 0.169 + ], + "angle": 0, + "content": "[463] Y. Liu, Y. Zhang, T. Jaakkola, and S. Chang, \"Revisiting who's harry potter: Towards targeted unlearning from a causal intervention perspective,\" arXiv preprint arXiv:2407.16997, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.171, + 0.923, + 0.213 + ], + "angle": 0, + "content": "[464] P. Maini, Z. Feng, A. Schwarzschild, Z. C. Lipton, and J. Z. Kolter, \"Tofu: A task of fictitious unlearning for llms,\" arXiv preprint arXiv:2401.06121, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.215, + 0.923, + 0.271 + ], + "angle": 0, + "content": "[465] R. Zhang, L. Lin, Y. Bai, and S. Mei, \"Negative preference optimization: From catastrophic collapse to effective unlearning,\" arXiv preprint arXiv:2404.05868, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.273, + 0.923, + 0.344 + ], + "angle": 0, + "content": "[466] R. Rafailov, A. Sharma, E. Mitchell, C. D. Manning, S. Ermon, and C. Finn, \"Direct preference optimization: Your language model is secretly a reward model,\" Advances in Neural Information Processing Systems, vol. 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.346, + 0.923, + 0.416 + ], + "angle": 0, + "content": "[467] J. Huo, Y. Yan, X. Zheng, Y. Lyu, X. Zou, Z. Wei, and X. Hu, \"Mmunlearner: Reformulating multimodal machine unlearning in the era of multimodal large language models,\" arXiv preprint arXiv:2502.11051, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.418, + 0.923, + 0.476 + ], + "angle": 0, + "content": "[468] J. Li, Q. Wei, C. Zhang, G. Qi, M. Du, Y. Chen, and S. Bi, \"Single image unlearning: Efficient machine unlearning in multimodal large language models,\" arXiv preprint arXiv:2405.12523, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.477, + 0.923, + 0.548 + ], + "angle": 0, + "content": "[469] S. Xing, F. Zhao, Z. Wu, T. An, W. Chen, C. Li, J. Zhang, and X. Dai, \"Efuf: Efficient fine-grained unlearning framework for mitigating hallucinations in multimodal large language models,\" ArXiv, vol. abs/2402.09801, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.55, + 0.923, + 0.621 + ], + "angle": 0, + "content": "[470] T. Chakraborty, E. Shayegani, Z. Cai, N. B. Abu-Ghazaleh, M. S. Asif, Y. Dong, A. K. Roy-Chowdhury, and C. Song, \"Cross-modal safety alignment: Is textual unlearning all you need?\" ArXiv, vol. abs/2406.02575, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.622, + 0.923, + 0.695 + ], + "angle": 0, + "content": "[471] J. Chen, Z. Deng, K. Zheng, Y. Yan, S. Liu, P. Wu, P. Jiang, J. Liu, and X. Hu, \"Safeeraser: Enhancing safety in multimodal large language models through multimodal machine unlearning,\" arXiv preprint arXiv:2502.12520, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.696, + 0.923, + 0.753 + ], + "angle": 0, + "content": "[472] G. Ilharco, M. T. Ribeiro, M. Wortsman, S. Gururangan, L. Schmidt, H. Hajishirzi, and A. Farhadi, \"Editing models with task arithmetic,\" arXiv preprint arXiv:2212.04089, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.755, + 0.923, + 0.797 + ], + "angle": 0, + "content": "[473] D. Jung, J. Seo, J. Lee, C. Park, and H. Lim, \"Come: An unlearning-based approach to conflict-free model editing,\" arXiv preprint arXiv:2502.15826, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.798, + 0.923, + 0.855 + ], + "angle": 0, + "content": "[474] B. Zhang, Z. Chen, Z. Zheng, J. Li, and H. Chen, \"Resolving editing-unlearning conflicts: A knowledge codebook framework for large language model updating,\" arXiv preprint arXiv:2502.00158, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.856, + 0.923, + 0.898 + ], + "angle": 0, + "content": "[475] R. Eldan and M. Russinovich, \"Who's harry potter? approximate unlearning in llms,\" arXiv preprint arXiv:2310.02238, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.9, + 0.923, + 0.943 + ], + "angle": 0, + "content": "[476] N. Li, A. Pan, A. Gopal, S. Yue, D. Berrios, A. Gatti, J. D. Li, A.-K. Dombrowski, S. Goel, L. Phan et al., \"The wmdp benchmark: Measuring and re" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "53" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.49, + 0.083 + ], + "angle": 0, + "content": "ducing malicious use with unlearning,\" arXiv preprint arXiv:2403.03218, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.084, + 0.492, + 0.127 + ], + "angle": 0, + "content": "[477] M. Pawelczyk, S. Neel, and H. Lakkaraju, \"In-context unlearning: Language models as few shot unlearners,\" arXiv preprint arXiv:2310.07579, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.127, + 0.492, + 0.17 + ], + "angle": 0, + "content": "[478] P. Thaker, Y. Maurya, S. Hu, Z. S. Wu, and V. Smith, \"Guardrail baselines for unlearning in llms,\" arXiv preprint arXiv:2403.03329, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.17, + 0.492, + 0.228 + ], + "angle": 0, + "content": "[479] J. Ren, Z. Dai, X. Tang, H. Liu, J. Zeng, Z. Li, R. Goutam, S. Wang, Y. Xing, and Q. He, \"A general framework to enhance fine-tuning-based llm unlearning,\" arXiv preprint arXiv:2502.17823, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.228, + 0.492, + 0.287 + ], + "angle": 0, + "content": "[480] X. Zhao, W. Cai, T. Shi, D. Huang, L. Lin, S. Mei, and D. Song, \"Improving llm safety alignment with dual-objective optimization,\" arXiv preprint arXiv:2503.03710, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.287, + 0.492, + 0.359 + ], + "angle": 0, + "content": "[481] S. Takashiro, T. Kojima, A. Gambardella, Q. Cao, Y. Iwasawa, and Y. Matsuo, \"Answer when needed, forget when not: Language models pretend to forget via in-context knowledge unlearning,\" arXiv preprint arXiv:2410.00382, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.36, + 0.492, + 0.403 + ], + "angle": 0, + "content": "[482] A. Muresanu, A. Thudi, M. R. Zhang, and N. Papernot, \"Unlearnable algorithms for in-context learning,\" arXiv preprint arXiv:2402.00751, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.403, + 0.492, + 0.447 + ], + "angle": 0, + "content": "[483] Y. Zhou, X. Li, Q. Wang, and J. Shen, \"Visual in-context learning for large vision-language models,\" arXiv preprint arXiv:2402.11574, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.447, + 0.492, + 0.505 + ], + "angle": 0, + "content": "[484] Z. Liu, G. Dou, X. Yuan, C. Zhang, Z. Tan, and M. Jiang, \"Modality-aware neuron pruning for unlearning in multimodal large language models,\" arXiv preprint arXiv:2502.15910, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.506, + 0.492, + 0.564 + ], + "angle": 0, + "content": "[485] N. Yang, M. Kim, S. Yoon, J. Shin, and K. Jung, \"Faithun: Toward faithful forgetting in language models by investigating the interconnectedness of knowledge,\" arXiv preprint arXiv:2502.19207, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.565, + 0.492, + 0.622 + ], + "angle": 0, + "content": "[486] A. Ramakrishna, Y. Wan, X. Jin, K.-W. Chang, Z. Bu, B. Vinzamuri, V. Cevher, M. Hong, and R. Gupta, \"Lume: Llm unlearning with multitask evaluations,\" arXiv preprint arXiv:2502.15097, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.622, + 0.492, + 0.695 + ], + "angle": 0, + "content": "[487] Y. Lang, K. Guo, Y. Huang, Y. Zhou, H. Zhuang, T. Yang, Y. Su, and X. Zhang, \"Beyond single-value metrics: Evaluating and enhancing llm unlearning with cognitive diagnosis,\" arXiv preprint arXiv:2502.13996, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.695, + 0.492, + 0.753 + ], + "angle": 0, + "content": "[488] Q. Wang, J. P. Zhou, Z. Zhou, S. Shin, B. Han, and K. Q. Weinberger, \"Rethinking llm unlearning objectives: A gradient perspective and go beyond,\" arXiv preprint arXiv:2502.19301, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.754, + 0.492, + 0.811 + ], + "angle": 0, + "content": "[489] M. Khoriaty, A. Shportko, G. Mercier, and Z. Wood-Doughty, \"Don't forget it! conditional sparse autoencoder clamping works for unlearning,\" arXiv preprint arXiv:2503.11127, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.812, + 0.492, + 0.855 + ], + "angle": 0, + "content": "[490] J. Cheng and H. Amiri, \"Mu-bench: A multitask multimodal benchmark for machine unlearning,\" arXiv preprint arXiv:2406.14796, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.855, + 0.492, + 0.914 + ], + "angle": 0, + "content": "[491] V. Patil, Y.-L. Sung, P. Hase, J. Peng, T. Chen, and M. Bansal, \"Unlearning sensitive information in multimodal llms: Benchmark and attack-defense evaluation,\" Transactions on Machine Learning Research." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.914, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[492] Y. Ma, J. Wang, F. Wang, S. Ma, J. Li, X. Li, F. Huang, L. Sun, B. Li, Y. Choi et al., \"Benchmarking vision lan" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.922, + 0.083 + ], + "angle": 0, + "content": "guage model unlearning via fictitious facial identity dataset,\" arXiv preprint arXiv:2411.03554, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.083, + 0.922, + 0.141 + ], + "angle": 0, + "content": "[493] S. Moon, M. Lee, S. Park, and D. Kim, “Holistic unlearning benchmark: A multi-faceted evaluation for text-to-image diffusion model unlearning,” arXiv preprint arXiv:2410.05664, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.141, + 0.922, + 0.17 + ], + "angle": 0, + "content": "[494] D. Sanyal and M. Mandal, \"Alu: Agentic llm unlearning,\" arXiv preprint arXiv:2502.00406, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.17, + 0.922, + 0.213 + ], + "angle": 0, + "content": "[495] J. Cheng and H. Amiri, \"Tool unlearning for tool-augmented llms,\" arXiv preprint arXiv:2502.01083, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.213, + 0.922, + 0.272 + ], + "angle": 0, + "content": "[496] H. Liu, P. Xiong, T. Zhu, and S. Y. Philip, \"A survey on machine unlearning: Techniques and new emerged privacy risks,\" Journal of Information Security and Applications, vol. 90, p. 104010, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.272, + 0.922, + 0.33 + ], + "angle": 0, + "content": "[497] S. Qureshi, T. Shaik, X. Tao, H. Xie, L. Li, J. Yong, and X. Jia, \"Exploring incremental unlearning: Techniques, challenges, and future directions,\" arXiv preprint arXiv:2502.16708, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.33, + 0.922, + 0.403 + ], + "angle": 0, + "content": "[498] J. Geng, Q. Li, H. Woisetschlaeger, Z. Chen, Y. Wang, P. Nakov, H.-A. Jacobsen, and F. Karray, \"A comprehensive survey of machine unlearning techniques for large language models,\" arXiv preprint arXiv:2503.01854, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.403, + 0.922, + 0.49 + ], + "angle": 0, + "content": "[499] X. He, C. Chen, L. Lyu, and Q. Xu, \"Extracted bert model leaks more information than you think!\" in Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, EMNLP 2022. Association for Computational Linguistics, 2022, pp. 1530-1537." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.491, + 0.922, + 0.563 + ], + "angle": 0, + "content": "[500] X. He, Q. Xu, L. Lyu, F. Wu, and C. Wang, \"Protecting intellectual property of language generation apis with lexical watermark,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 36, no. 10, 2022, pp. 10758-10766." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.564, + 0.922, + 0.636 + ], + "angle": 0, + "content": "[501] X. He, Q. Xu, Y. Zeng, L. Lyu, F. Wu, J. Li, and R. Jia, \"Cater: Intellectual property protection on text generation apis via conditional watermarks,\" Advances in Neural Information Processing Systems, vol. 35, pp. 5431-5445, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.636, + 0.922, + 0.738 + ], + "angle": 0, + "content": "[502] W. Peng, J. Yi, F. Wu, S. Wu, B. B. Zhu, L. Lyu, B. Jiao, T. Xu, G. Sun, and X. Xie, \"Are you copying my model? protecting the copyright of large language models for eaas via backdoor watermark,\" in Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2023, pp. 7653-7668." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.739, + 0.922, + 0.798 + ], + "angle": 0, + "content": "[503] N. Carlini, D. Paleka, K. D. Dvijotham, T. Steinke, J. Hayase, A. F. Cooper, K. Lee, M. Jagielski, M. Nasr, A. Conmy et al., \"Stealing part of a production language model,\" arXiv preprint arXiv:2403.06634, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.798, + 0.922, + 0.841 + ], + "angle": 0, + "content": "[504] M. Finlayson, X. Ren, and S. Swayamdipta, \"Logits of api-protected llms leak proprietary information,\" arXiv preprint arXiv:2403.09539, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.841, + 0.922, + 0.899 + ], + "angle": 0, + "content": "[505] S. Zanella-Beguelin, S. Tople, A. Paverd, and B. Köpf, \"Grey-box extraction of natural language models,\" in International Conference on Machine Learning. PMLR, 2021, pp. 12278-12286." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.899, + 0.922, + 0.943 + ], + "angle": 0, + "content": "[506] E. Horwitz, J. Kahana, and Y. Hoshen, \"Recovering the pre-fine-tuning weights of generative models,\" arXiv preprint arXiv:2402.10208, 2024." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.922, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "54" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.127 + ], + "angle": 0, + "content": "[507] Z. Li, C. Wang, P. Ma, C. Liu, S. Wang, D. Wu, C. Gao, and Y. Liu, \"On extracting specialized code abilities from large language models: A feasibility study,\" in Proceedings of the IEEE/ACM 46th International Conference on Software Engineering, 2024, pp. 1-13." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.127, + 0.492, + 0.17 + ], + "angle": 0, + "content": "[508] A. Liu and A. Moitra, \"Model stealing for any low-rank language model,\" arXiv preprint arXiv:2411.07536, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.17, + 0.492, + 0.227 + ], + "angle": 0, + "content": "[509] W. Shi, A. Ajith, M. Xia, Y. Huang, D. Liu, T. Blevins, D. Chen, and L. Zettlemoyer, \"Detecting pretraining data from large language models,\" arXiv preprint arXiv:2310.16789, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.228, + 0.492, + 0.287 + ], + "angle": 0, + "content": "[510] J. Zhang, J. Sun, E. Yeats, Y. Ouyang, M. Kuo, J. Zhang, H. F. Yang, and H. Li, \"Min-\\(k\\%\\)++: Improved baseline for detecting pre-training data from large language models,\" arXiv preprint arXiv:2404.02936, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.287, + 0.492, + 0.33 + ], + "angle": 0, + "content": "[511] D. Das, J. Zhang, and F. Tramér, \"Blind baselines beat membership inference attacks for foundation models,\" arXiv preprint arXiv:2406.16201, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.33, + 0.492, + 0.389 + ], + "angle": 0, + "content": "[512] P. Maini, H. Jia, N. Papernot, and A. Dziedzic, \"Llm dataset inference: Did you train on my dataset?\" Advances in Neural Information Processing Systems, vol. 37, pp. 124069-124092, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.389, + 0.492, + 0.446 + ], + "angle": 0, + "content": "[513] A. V. Duarte, X. Zhao, A. L. Oliveira, and L. Li, \"De-cop: Detecting copyrighted content in language models training data,\" arXiv preprint arXiv:2402.09910, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.446, + 0.492, + 0.505 + ], + "angle": 0, + "content": "[514] R. Xie, J. Wang, R. Huang, M. Zhang, R. Ge, J. Pei, N. Z. Gong, and B. Dhingra, \"Recall: Membership inference via relative conditional log-likelihoods,\" arXiv preprint arXiv:2406.15968, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.505, + 0.492, + 0.55 + ], + "angle": 0, + "content": "[515] F. Galli, L. Melis, and T. Cucinotta, \"Noisy neighbors: Efficient membership inference attacks against llms,\" arXiv preprint arXiv:2406.16565, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.55, + 0.492, + 0.593 + ], + "angle": 0, + "content": "[516] H. Mozaffari and V. J. Marathe, \"Semantic membership inference attack against large language models,\" arXiv preprint arXiv:2406.10218, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.593, + 0.492, + 0.666 + ], + "angle": 0, + "content": "[517] M. Meeus, S. Jain, M. Rei, and Y.-A. de Montjoye, \"Did the neurons read your book? document-level membership inference for large language models,\" in 33rd USENIX Security Symposium (USENIX Security 24), 2024, pp. 2369-2385." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.666, + 0.492, + 0.709 + ], + "angle": 0, + "content": "[518] M. Meeus, I. Shilov, M. Faysse, and Y.-A. De Montjoye, \"Copyright traps for large language models,\" arXiv preprint arXiv:2402.09363, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.709, + 0.492, + 0.767 + ], + "angle": 0, + "content": "[519] H. Puerto, M. Gubri, S. Yun, and S. J. Oh, \"Scaling up membership inference: When and how attacks succeed on large language models,\" arXiv preprint arXiv:2411.00154, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.767, + 0.492, + 0.826 + ], + "angle": 0, + "content": "[520] M. Anderson, G. Amit, and A. Goldsteen, “Is my data in your retrieval database? membership inference attacks against retrieval augmented generation,” arXiv preprint arXiv:2405.20446, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.826, + 0.492, + 0.884 + ], + "angle": 0, + "content": "[521] Y. Li, G. Liu, C. Wang, and Y. Yang, \"Generating is believing: Membership inference attacks against retrieval-augmented generation,\" arXiv preprint arXiv:2406.19234, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.884, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[522] R. Wen, Z. Li, M. Backes, and Y. Zhang, \"Membership inference attacks against in-context learning,\" in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 3481-" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.055, + 0.586, + 0.067 + ], + "angle": 0, + "content": "3495." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.068, + 0.922, + 0.111 + ], + "angle": 0, + "content": "[523] H. Duan, A. Dziedzic, M. Yaghini, N. Papernot, and F. Boenisch, \"On the privacy risk of in-context learning,\" arXiv preprint arXiv:2411.10512, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.112, + 0.922, + 0.17 + ], + "angle": 0, + "content": "[524] Y. Wen, L. Marchyok, S. Hong, J. Geiping, T. Goldstein, and N. Carlini, \"Privacy backdoors: Enhancing membership inference through poisoning pre-trained models,\" arXiv preprint arXiv:2404.01231, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.17, + 0.922, + 0.228 + ], + "angle": 0, + "content": "[525] R. Wen, T. Wang, M. Backes, Y. Zhang, and A. Salem, \"Last one standing: A comparative analysis of security and privacy of soft prompt tuning, lora, and in-context learning,\" arXiv preprint arXiv:2310.11397, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.228, + 0.922, + 0.286 + ], + "angle": 0, + "content": "[526] S. Balloccu, P. Schmidtová, M. Lango, and O. Dusek, \"Leak, cheat, repeat: Data contamination and evaluation malpractices in closed-source llms,\" arXiv preprint arXiv:2402.03927, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.287, + 0.922, + 0.36 + ], + "angle": 0, + "content": "[527] W. Fu, H. Wang, C. Gao, G. Liu, Y. Li, and T. Jiang, \"Membership inference attacks against fine-tuned large language models via self-prompt calibration,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.36, + 0.922, + 0.418 + ], + "angle": 0, + "content": "[528] H. Li, G. Deng, Y. Liu, K. Wang, Y. Li, T. Zhang, Y. Liu, G. Xu, G. Xu, and H. Wang, \"Digger: Detecting copyright content mis-usage in large language model training,\" arXiv preprint arXiv:2401.00676, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.418, + 0.922, + 0.475 + ], + "angle": 0, + "content": "[529] A. Naseh and N. Mireshghallah, \"Synthetic data can mislead evaluations: Membership inference as machine text detection,\" arXiv preprint arXiv:2501.11786, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.476, + 0.922, + 0.535 + ], + "angle": 0, + "content": "[530] Z. Liao and H. Sun, \"Amplegcg: Learning a universal and transferable generative model of adversarial suffixes for jailbreaking both open and closed llms,\" arXiv preprint arXiv:2404.07921, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.535, + 0.922, + 0.593 + ], + "angle": 0, + "content": "[531] X. Jia, T. Pang, C. Du, Y. Huang, J. Gu, Y. Liu, X. Cao, and M. Lin, \"Improved techniques for optimization-based jailbreaking on large language models,\" arXiv preprint arXiv:2405.21018, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.593, + 0.922, + 0.652 + ], + "angle": 0, + "content": "[532] Y. Zhang and Z. Wei, \"Boosting jailbreak attack with momentum,\" in ICASSP 2025-2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2025, pp. 1-5." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.652, + 0.922, + 0.738 + ], + "angle": 0, + "content": "[533] Y. Zhao, W. Zheng, T. Cai, D. Xuan Long, K. Kawaguchi, A. Goyal, and M. Q. Shieh, \"Accelerating greedy coordinate gradient and general prompt optimization via probe sampling,\" Advances in Neural Information Processing Systems, vol. 37, pp. 53710-53731, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.738, + 0.922, + 0.783 + ], + "angle": 0, + "content": "[534] X. Liu, N. Xu, M. Chen, and C. Xiao, \"Autodan: Generating stealthy jailbreak prompts on aligned large language models,\" arXiv preprint arXiv:2310.04451, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.783, + 0.922, + 0.853 + ], + "angle": 0, + "content": "[535] S. Zhu, R. Zhang, B. An, G. Wu, J. Barrow, Z. Wang, F. Huang, A. Nenkova, and T. Sun, \"Autodan: interpretable gradient-based adversarial attacks on large language models,\" arXiv preprint arXiv:2310.15140, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.853, + 0.922, + 0.928 + ], + "angle": 0, + "content": "[536] A. Mehrotra, M. Zampetakis, P. Kassianik, B. Nelson, H. Anderson, Y. Singer, and A. Karbasi, \"Tree of attacks: Jailbreaking black-box llms automatically,\" Advances in Neural Information Processing Systems, vol. 37, pp. 61-65, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.928, + 0.922, + 0.943 + ], + "angle": 0, + "content": "[537] C. Sitawarin, N. Mu, D. Wagner, and A. Araujo," + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.055, + 0.922, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "55" + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.054, + 0.49, + 0.083 + ], + "angle": 0, + "content": "\"Pal: Proxy-guided black-box attack on large language models,\" arXiv preprint arXiv:2402.09674, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.084, + 0.492, + 0.141 + ], + "angle": 0, + "content": "[538] G. Deng, Y. Liu, Y. Li, K. Wang, Y. Zhang, Z. Li, H. Wang, T. Zhang, and Y. Liu, \"Masterkey: Automated jailbreak across multiple large language model chatbots,\" arXiv preprint arXiv:2307.08715, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.142, + 0.492, + 0.2 + ], + "angle": 0, + "content": "[539] X. Liu, P. Li, E. Suh, Y. Vorobeychik, Z. Mao, S. Jha, P. McDaniel, H. Sun, B. Li, and C. Xiao, \"Autodanturbo: A lifelong agent for strategy self-exploration to jailbreak llms,\" arXiv preprint arXiv:2410.05295, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.2, + 0.492, + 0.242 + ], + "angle": 0, + "content": "[540] Y. Liu, X. He, M. Xiong, J. Fu, S. Deng, and B. Hooi, \"Flipattack: Jailbreak llms via flipping,\" arXiv preprint arXiv:2410.02832, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.243, + 0.492, + 0.302 + ], + "angle": 0, + "content": "[541] T. Wu, Z. Xue, Y. Liu, J. Zhang, B. Hooi, and S.-K. Ng, \"Geneshift: Impact of different scenario shift on jailbreaking llm,\" 2025. [Online]. Available: https://arxiv.org/abs/2504.08104" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.302, + 0.492, + 0.345 + ], + "angle": 0, + "content": "[542] F. Perez and I. Ribeiro, \"Ignore previous prompt: Attack techniques for language models,\" arXiv preprint arXiv:2211.09527, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.346, + 0.492, + 0.433 + ], + "angle": 0, + "content": "[543] K. Greshake, S. Abdelnabi, S. Mishra, C. Endres, T. Holz, and M. Fritz, \"Not what you've signed up for: Compromising real-world llm-integrated applications with indirect prompt injection,\" in Proceedings of the 16th ACM Workshop on Artificial Intelligence and Security, 2023, pp. 79-90." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.433, + 0.492, + 0.491 + ], + "angle": 0, + "content": "[544] Y. Liu, G. Deng, Y. Li, K. Wang, Z. Wang, X. Wang, T. Zhang, Y. Liu, H. Wang, Y. Zheng et al., \"Prompt injection attack against llm-integrated applications,\" arXiv preprint arXiv:2306.05499, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.491, + 0.492, + 0.564 + ], + "angle": 0, + "content": "[545] S. Toyer, O. Watkins, E. A. Mendes, J. Svegliato, L. Bailey, T. Wang, I. Ong, K. Elmaaroufi, P. Abbeel, T. Darrell et al., \"Tensor trust: Interpretable prompt injection attacks from an online game,\" arXiv preprint arXiv:2311.01011, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.565, + 0.492, + 0.637 + ], + "angle": 0, + "content": "[546] J. Shi, Z. Yuan, Y. Liu, Y. Huang, P. Zhou, L. Sun, and N. Z. Gong, \"Optimization-based prompt injection attack to lmm-as-a-judge,\" in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 660-674." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.637, + 0.492, + 0.695 + ], + "angle": 0, + "content": "[547] X. Liu, Z. Yu, Y. Zhang, N. Zhang, and C. Xiao, \"Automatic and universal prompt injection attacks against large language models,\" arXiv preprint arXiv:2403.04957, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.695, + 0.492, + 0.738 + ], + "angle": 0, + "content": "[548] X. Liu, S. Jha, P. McDaniel, B. Li, and C. Xiao, \"Autohijacker: Automatic indirect prompt injection against black-box llm agents.\"" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.739, + 0.492, + 0.797 + ], + "angle": 0, + "content": "[549] A. Al-Kaswan, M. Izadi, and A. Van Deursen, \"Targeted attack on gpt-neo for the satml language model data extraction challenge,\" arXiv preprint arXiv:2302.07735, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.798, + 0.492, + 0.855 + ], + "angle": 0, + "content": "[550] E. Su, A. Vellore, A. Chang, R. Mura, B. Nelson, P. Kassianik, and A. Karbasi, \"Extracting memorized training data via decomposition,\" arXiv preprint arXiv:2409.12367, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.856, + 0.492, + 0.899 + ], + "angle": 0, + "content": "[551] J. Huang, H. Shao, and K. C.-C. Chang, \"Are large pre-trained language models leaking your personal information?\" arXiv preprint arXiv:2205.12628, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.899, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[552] Z. Zhang, J. Wen, and M. Huang, \"Ethicist: Targeted training data extraction through loss smoothed soft prompting and calibrated confidence estimation,\"" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.806, + 0.069 + ], + "angle": 0, + "content": "arXiv preprint arXiv:2307.04401, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.069, + 0.923, + 0.127 + ], + "angle": 0, + "content": "[553] K. K. Nakka, A. Frikha, R. Mendes, X. Jiang, and X. Zhou, \"Pii-compass: Guiding llm training data extraction prompts towards the target pii via grounding,\" arXiv preprint arXiv:2407.02943, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.127, + 0.923, + 0.186 + ], + "angle": 0, + "content": "[554] Z. Wang, R. Bao, Y. Wu, J. Taylor, C. Xiao, F. Zheng, W. Jiang, S. Gao, and Y. Zhang, \"Unlocking memorization in large language models with dynamic soft prompting,\" arXiv preprint arXiv:2409.13853, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.186, + 0.923, + 0.243 + ], + "angle": 0, + "content": "[555] J. G. Wang, J. Wang, M. Li, and S. Neel, \"Pandora's white-box: Precise training data detection and extraction in large language models,\" arXiv preprint arXiv:2402.17012, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.243, + 0.923, + 0.287 + ], + "angle": 0, + "content": "[556] Z. Sha and Y. Zhang, \"Prompt stealing attacks against large language models,\" arXiv preprint arXiv:2402.12959, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.287, + 0.923, + 0.33 + ], + "angle": 0, + "content": "[557] C. Zhang, J. X. Morris, and V. Shmatikov, \"Extracting prompts by inverting llm outputs,\" arXiv preprint arXiv:2405.15012, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.33, + 0.923, + 0.389 + ], + "angle": 0, + "content": "[558] Y. Yang, C. Li, Y. Jiang, X. Chen, H. Wang, X. Zhang, Z. Wang, and S. Ji, \"Prsa: Prompt stealing attacks against large language models,\" arXiv preprint arXiv:2402.19200, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.389, + 0.923, + 0.476 + ], + "angle": 0, + "content": "[559] Y. Zeng, H. Lin, J. Zhang, D. Yang, R. Jia, and W. Shi, \"How johnny can persuade llms to jailbreak them: Rethinking persuasion to challenge ai safety by humanizing llms,\" in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2024, pp. 14322-14350." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.476, + 0.923, + 0.564 + ], + "angle": 0, + "content": "[560] X. Shen, Z. Chen, M. Backes, Y. Shen, and Y. Zhang, \"do anything now\": Characterizing and evaluating in-the-wild jailbreak prompts on large language models,\" in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 1671-1685." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.565, + 0.923, + 0.622 + ], + "angle": 0, + "content": "[561] Z. Wang, W. Xie, B. Wang, E. Wang, Z. Gui, S. Ma, and K. Chen, \"Foot in the door: Understanding large language model jailbreaking via cognitive psychology,\" arXiv preprint arXiv:2402.15690, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.622, + 0.923, + 0.71 + ], + "angle": 0, + "content": "[562] M. Samvelyan, S. C. Raparthy, A. Lupu, E. Hambro, A. Markosyan, M. Bhatt, Y. Mao, M. Jiang, J. Parker-Holder, J. Foerster et al., \"Rainbow teaming: Open-ended generation of diverse adversarial prompts,\" Advances in Neural Information Processing Systems, vol. 37, pp. 69747-69786, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.71, + 0.923, + 0.768 + ], + "angle": 0, + "content": "[563] H. Jin, R. Chen, A. Zhou, Y. Zhang, and H. Wang, \"Guard: Role-playing to generate natural-language jailbreakings to test guideline adherence of large language models,\" arXiv preprint arXiv:2402.03299, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.768, + 0.923, + 0.825 + ], + "angle": 0, + "content": "[564] Y. Yuan, W. Jiao, W. Wang, J.-t. Huang, P. He, S. Shi, and Z. Tu, \"Gpt-4 is too smart to be safe: Stealthy chat with llms via cipher,\" arXiv preprint arXiv:2308.06463, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.826, + 0.923, + 0.899 + ], + "angle": 0, + "content": "[565] H. Lv, X. Wang, Y. Zhang, C. Huang, S. Dou, J. Ye, T. Gui, Q. Zhang, and X. Huang, \"Codechameleon: Personalized encryption framework for jailbreaking large language models,\" arXiv preprint arXiv:2402.16717, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.899, + 0.923, + 0.944 + ], + "angle": 0, + "content": "[566] F. Jiang, Z. Xu, L. Niu, Z. Xiang, B. Ramasubramanian, B. Li, and R. Poovendran, \"Artprompt: Ascii art-based jailbreak attacks against aligned llms,\" in Proceedings" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "56" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.49, + 0.097 + ], + "angle": 0, + "content": "of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2024, pp. 15 157-15 173." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.098, + 0.492, + 0.169 + ], + "angle": 0, + "content": "[567] C. Anil, E. Durmus, N. Panickssery, M. Sharma, J. Benton, S. Kundu, J. Batson, M. Tong, J. Mu, D. Ford et al., \"Many-shot jailbreaking,\" Advances in Neural Information Processing Systems, vol. 37, pp. 129-696-129742, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.17, + 0.492, + 0.213 + ], + "angle": 0, + "content": "[568] Z.-X. Yong, C. Menghini, and S. H. Bach, \"Low-resource languages jailbreak gpt-4,\" arXiv preprint arXiv:2310.02446, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.214, + 0.492, + 0.301 + ], + "angle": 0, + "content": "[569] W. Wang, Z. Tu, C. Chen, Y. Yuan, J.-T. Huang, W. Jiao, and M. R. Lyu, \"All languages matter: On the multilingual safety of llms,\" in Annual Meeting of the Association for Computational Linguistics, 2024. [Online]. Available: https://api-semanticscholar.org/ CorpusID:271931322" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.302, + 0.492, + 0.359 + ], + "angle": 0, + "content": "[570] Z. Wei, Y. Wang, A. Li, Y. Mo, and Y. Wang, \"Jailbreak and guard aligned language models with only few in-context demonstrations,\" arXiv preprint arXiv:2310.06387, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.36, + 0.492, + 0.417 + ], + "angle": 0, + "content": "[571] N. Xu, F. Wang, B. Zhou, B. Z. Li, C. Xiao, and M. Chen, \"Cognitive overload: Jailbreaking large language models with overloaded logical thinking,\" arXiv preprint arXiv:2311.09827, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.418, + 0.492, + 0.476 + ], + "angle": 0, + "content": "[572] P. Ding, J. Kuang, D. Ma, X. Cao, Y. Xian, J. Chen, and S. Huang, \"A wolf in sheep's clothing: Generalized nested jailbreak prompts can fool large language models easily,\" arXiv preprint arXiv:2311.08268, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.477, + 0.492, + 0.52 + ], + "angle": 0, + "content": "[573] B. Upadhayay and V. Behzadan, \"Sandwich attack: Multi-language mixture adaptive attack on llms,\" arXiv preprint arXiv:2404.07242, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.521, + 0.492, + 0.608 + ], + "angle": 0, + "content": "[574] D. Yao, J. Zhang, I. G. Harris, and M. Carlsson, \"Fuzzllm: A novel and universal fuzzing framework for proactively discovering jailbreak vulnerabilities in large language models,\" in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 4485-4489." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.608, + 0.492, + 0.68 + ], + "angle": 0, + "content": "[575] B. Li, H. Xing, C. Huang, J. Qian, H. Xiao, L. Feng, and C. Tian, \"Structuralsleight: Automated jailbreak attacks on large language models utilizing uncommon text-encoded structure,\" arXiv e-prints, pp. arXiv-2406, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.681, + 0.492, + 0.736 + ], + "angle": 0, + "content": "[576] A. Paulus, A. Zharmagambetov, C. Guo, B. Amos, and Y. Tian, \"Advprompter: Fast adaptive adversarial prompting for llms,\" arXiv preprint arXiv:2404.16873, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.738, + 0.492, + 0.796 + ], + "angle": 0, + "content": "[577] A. Wei, N. Haghtalab, and J. Steinhardt, \"Jailbroken: How does llm safety training fail?\" Advances in Neural Information Processing Systems, vol. 36, pp. 80079-80110, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.798, + 0.492, + 0.87 + ], + "angle": 0, + "content": "[578] Z. Chen, Z. Zhao, W. Qu, Z. Wen, Z. Han, Z. Zhu, J. Zhang, and H. Yao, \"Pandora: Detailed llm jailbreaking via collaborated phishing agents with decomposed reasoning,\" in ICLR 2024 Workshop on Secure and Trustworthy Large Language Models, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.871, + 0.492, + 0.928 + ], + "angle": 0, + "content": "[579] E. Perez, S. Huang, F. Song, T. Cai, R. Ring, J. Aslanides, A. Glaese, N. McAleese, and G. Irving, \"Red teaming language models with language models,\" arXiv preprint arXiv:2202.03286, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.929, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[580] R. Shah, S. Pour, A. Tagade, S. Casper, J. Rando et al.," + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.923, + 0.097 + ], + "angle": 0, + "content": "\"Scalable and transferable black-box jailbreaks for language models via persona modulation,\" arXiv preprint arXiv:2311.03348, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.098, + 0.923, + 0.141 + ], + "angle": 0, + "content": "[581] X. Guo, F. Yu, H. Zhang, L. Qin, and B. Hu, \"Coldattack: Jailbreaking lms with stealthiness and controllability,\" arXiv preprint arXiv:2402.08679, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.142, + 0.923, + 0.198 + ], + "angle": 0, + "content": "[582] J. Yu, H. Luo, J. Y.-C. Hu, W. Guo, H. Liu, and X. Xing, \"Enhancing jailbreak attack against large language models through silent tokens,\" arXiv preprint arXiv:2405.20653, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.2, + 0.923, + 0.258 + ], + "angle": 0, + "content": "[583] Z.-W. Hong, I. Shenfeld, T.-H. Wang, Y.-S. Chuang, A. Pareja, J. Glass, A. Srivastava, and P. Agrawal, \"Curiosity-driven red-teaming for large language models,\" arXiv preprint arXiv:2402.19464, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.259, + 0.923, + 0.331 + ], + "angle": 0, + "content": "[584] X. Zheng, T. Pang, C. Du, Q. Liu, J. Jiang, and M. Lin, \"Improved few-shot jailbreaking can circumvent aligned language models and their defenses,\" Advances in Neural Information Processing Systems, vol. 37, pp. 32-856-32-887, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.332, + 0.923, + 0.374 + ], + "angle": 0, + "content": "[585] Z. Xiao, Y. Yang, G. Chen, and Y. Chen, \"Distract large language models for automatic jailbreak attack,\" arXiv preprint arXiv:2403.08424, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.375, + 0.923, + 0.432 + ], + "angle": 0, + "content": "[586] Z. Chang, M. Li, Y. Liu, J. Wang, Q. Wang, and Y. Liu, \"Play guessing game with llm: Indirect jailbreak attack with implicit clues,\" arXiv preprint arXiv:2402.09091, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.433, + 0.923, + 0.49 + ], + "angle": 0, + "content": "[587] J. Yu, X. Lin, Z. Yu, and X. Xing, \"Gptfuzzer: Red teaming large language models with auto-generated jailbreak prompts,\" arXiv preprint arXiv:2309.10253, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.491, + 0.923, + 0.563 + ], + "angle": 0, + "content": "[588] W. Jiang, Z. Wang, J. Zhai, S. Ma, Z. Zhao, and C. Shen, \"Unlocking adversarial suffix optimization without affirmative phrases: Efficient black-box jailbreaking via llm as optimizer,\" arXiv preprint arXiv:2408.11313, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.565, + 0.923, + 0.607 + ], + "angle": 0, + "content": "[589] J. Zhang, Z. Wang, R. Wang, X. Ma, and Y.-G. Jiang, \"Enja: Ensemble jailbreak on large language models,\" arXiv preprint arXiv:2408.03603, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.608, + 0.923, + 0.651 + ], + "angle": 0, + "content": "[590] X. Zhao, X. Yang, T. Pang, C. Du, L. Li, Y.-X. Wang, and W. Y. Wang, \"Weak-to-strong jailbreaking on large language models,\" arXiv preprint arXiv:2401.17256, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.652, + 0.923, + 0.695 + ], + "angle": 0, + "content": "[591] B. Upadhayay, V. Behzadan, and A. Karbasi, \"Cognitive overload attack: Prompt injection for long context,\" arXiv preprint arXiv:2410.11272, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.696, + 0.923, + 0.751 + ], + "angle": 0, + "content": "[592] H. Kwon and W. Pak, \"Text-based prompt injection attack using mathematical functions in modern large language models,\" *Electronics*, vol. 13, no. 24, p. 5008, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.753, + 0.923, + 0.812 + ], + "angle": 0, + "content": "[593] E. Bagdasaryan, T.-Y. Hsieh, B. Nassi, and V. Shmatikov, \"Abusing images and sounds for indirect instruction injection in multi-modal llms,\" arXiv preprint arXiv:2307.10490, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.813, + 0.923, + 0.883 + ], + "angle": 0, + "content": "[594] D. Pasquini, M. Strohmeier, and C. Troncoso, \"Neural exec: Learning (and learning from) execution triggers for prompt injection attacks,\" in Proceedings of the 2024 Workshop on Artificial Intelligence and Security, 2024, pp. 89-100." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.885, + 0.923, + 0.928 + ], + "angle": 0, + "content": "[595] Z. Shao, H. Liu, J. Mu, and N. Z. Gong, \"Making llms vulnerable to prompt injection via poisoning alignment,\" arXiv preprint arXiv:2410.14827, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.929, + 0.923, + 0.943 + ], + "angle": 0, + "content": "[596] Y. Yang, H. Yao, B. Yang, Y. He, Y. Li, T. Zhang," + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.033, + 0.923, + 0.043 + ], + "angle": 0, + "content": "57" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.054, + 0.492, + 0.098 + ], + "angle": 0, + "content": "Z. Qin, and K. Ren, \"Tapi: Towards target-specific and adversarial prompt injection against code llms,\" arXiv preprint arXiv:2407.09164, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.098, + 0.492, + 0.142 + ], + "angle": 0, + "content": "[597] Y. Ren, \"F2a: An innovative approach for prompt injection by utilizing feign security detection agents,\" arXiv preprint arXiv:2410.08776, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.142, + 0.492, + 0.2 + ], + "angle": 0, + "content": "[598] R. Pedro, D. Castro, P. Carreira, and N. Santos, \"From prompt injections to sql injection attacks: How protected is your llm-integrated web application?\" arXiv preprint arXiv:2308.01990, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.2, + 0.492, + 0.258 + ], + "angle": 0, + "content": "[599] Y. Lee, T. Park, Y. Lee, J. Gong, and J. Kang, \"Exploring potential prompt injection attacks in federated military Ilms and their mitigation,\" arXiv preprint arXiv:2501.18416, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.259, + 0.492, + 0.302 + ], + "angle": 0, + "content": "[600] D. Lee and M. Tiwari, \"Prompt infection: Llm-to-llm prompt injection within multi-agent systems,\" arXiv preprint arXiv:2410.07283, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.302, + 0.492, + 0.387 + ], + "angle": 0, + "content": "[601] W. Zhang, X. Kong, C. Dewitt, T. Braunl, and J. B. Hong, \"A study on prompt injection attack against lvm-integrated mobile robotic systems,\" in 2024 IEEE 35th International Symposium on Software Reliability Engineering Workshops (ISSREW). IEEE, 2024, pp. 361-368." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.389, + 0.492, + 0.447 + ], + "angle": 0, + "content": "[602] W. Meng, Z. Guo, L. Wu, C. Gong, W. Liu, W. Li, C. Wei, and W. Chen, \"Rr: Unveiling llm training privacy through recollection and ranking,\" arXiv preprint arXiv:2502.12658, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.448, + 0.492, + 0.506 + ], + "angle": 0, + "content": "[603] B. Jayaraman, E. Ghosh, H. Inan, M. Chase, S. Roy, and W. Dai, \"Active data pattern extraction attacks on generative language models,\" arXiv preprint arXiv:2207.10802, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.506, + 0.492, + 0.579 + ], + "angle": 0, + "content": "[604] Z. Zeng, T. Xiang, S. Guo, J. He, Q. Zhang, G. Xu, and T. Zhang, \"Contrast-then-approximate: Analyzing keyword leakage of generative language models,\" IEEE Transactions on Information Forensics and Security, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.579, + 0.492, + 0.65 + ], + "angle": 0, + "content": "[605] C. Jiang, X. Pan, G. Hong, C. Bao, and M. Yang, \"Rag-thief: Scalable extraction of private data from retrieval-augmented generation applications with agent-based attacks,\" arXiv preprint arXiv:2411.14110, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.651, + 0.492, + 0.71 + ], + "angle": 0, + "content": "[606] Z. Qi, H. Zhang, E. Xing, S. Kakade, and H. Lakkaraju, \"Follow my instruction and spill the beans: Scalable data extraction from retrieval-augmented generation systems,\" arXiv preprint arXiv:2402.17840, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.71, + 0.492, + 0.782 + ], + "angle": 0, + "content": "[607] S. Zeng, J. Zhang, P. He, Y. Xing, Y. Liu, H. Xu, J. Ren, S. Wang, D. Yin, Y. Chang et al., \"The good and the bad: Exploring privacy issues in retrieval-augmented generation (rag),\" arXiv preprint arXiv:2402.16893, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.782, + 0.492, + 0.827 + ], + "angle": 0, + "content": "[608] Y. Peng, J. Wang, H. Yu, and A. Houmansadr, \"Data extraction attacks in retrieval-augmented generation via backdoors,\" arXiv preprint arXiv:2411.01705, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.827, + 0.492, + 0.884 + ], + "angle": 0, + "content": "[609] A. Panda, C. A. Choquette-Choo, Z. Zhang, Y. Yang, and P. Mittal, \"Teach llms to phish: Stealing private information from language models,\" arXiv preprint arXiv:2403.00871, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.884, + 0.492, + 0.929 + ], + "angle": 0, + "content": "[610] L. Lu, Z. Zuo, Z. Sheng, and P. Zhou, “Merger-as-a-stealer: Stealing targeted pii from aligned llms with model merging,” arXiv preprint arXiv:2502.16094, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.929, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[611] X. Chen, S. Tang, R. Zhu, S. Yan, L. Jin, Z. Wang, L. Su," + }, + { + "type": "list", + "bbox": [ + 0.076, + 0.054, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.054, + 0.923, + 0.128 + ], + "angle": 0, + "content": "Z. Zhang, X. Wang, and H. Tang, \"The janus interface: How fine-tuning in large language models amplifies the privacy risks,\" in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 1285-1299." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.128, + 0.923, + 0.157 + ], + "angle": 0, + "content": "[612] R. Panchendrarajan and S. Bhoi, \"Dataset reconstruction attack against language models,\" 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.157, + 0.923, + 0.214 + ], + "angle": 0, + "content": "[613] M. R. U. Rashid, V. A. Dasu, K. Gu, N. Sultana, and S. Mehnaz, \"Fltrojan: Privacy leakage attacks against federated language models through selective weight tampering,\" arXiv preprint arXiv:2310.16152, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.214, + 0.923, + 0.273 + ], + "angle": 0, + "content": "[614] J. Dentan, A. Paran, and A. Shabou, \"Reconstructing training data from document understanding models,\" in 33rd USENIX Security Symposium (USENIX Security 24), 2024, pp. 6813-6830." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.273, + 0.923, + 0.346 + ], + "angle": 0, + "content": "[615] J. Hósciłowicz, P. Popiołek, J. Rudkowski, J. Bieniasz, and A. Janicki, \"Unconditional token forcing: Extracting text hidden within llm,\" in 2024 19th Conference on Computer Science and Intelligence Systems (FedCSIS). IEEE, 2024, pp. 621-624." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.346, + 0.923, + 0.404 + ], + "angle": 0, + "content": "[616] A. Al-Kaswan, M. Izadi, and A. Van Deursen, \"Traces of memorisation in large language models for code,\" in Proceedings of the IEEE/ACM 46th International Conference on Software Engineering, 2024, pp. 1-12." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.404, + 0.923, + 0.462 + ], + "angle": 0, + "content": "[617] Y. Nie, C. Wang, K. Wang, G. Xu, G. Xu, and H. Wang, \"Decoding secret memorization in code llms through token-level characterization,\" arXiv preprint arXiv:2410.08858, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.462, + 0.923, + 0.506 + ], + "angle": 0, + "content": "[618] E. Lehman, S. Jain, K. Pichotta, Y. Goldberg, and B. C. Wallace, \"Does bert pretrained on clinical notes reveal sensitive data?\" arXiv preprint arXiv:2104.07762, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.506, + 0.923, + 0.579 + ], + "angle": 0, + "content": "[619] A. Diera, N. Lell, A. Garifullina, and A. Scherp, \"Memorization of named entities in fine-tuned bert models,\" in International Cross-Domain Conference for Machine Learning and Knowledge Extraction. Springer, 2023, pp. 258-279." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.579, + 0.923, + 0.637 + ], + "angle": 0, + "content": "[620] R. Zhang, S. Hidano, and F. Koushanfar, \"Text re- vealer: Private text reconstruction via model inversion attacks against transformers,\" arXiv preprint arXiv:2209.10505, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.637, + 0.923, + 0.71 + ], + "angle": 0, + "content": "[621] Y. Huang, Y. Li, W. Wu, J. Zhang, and M. R. Lyu, \"Your code secret belongs to me: neural code completion tools can memorize hard-coded credentials,\" Proceedings of the ACM on Software Engineering, vol. 1, no. FSE, pp. 2515-2537, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.71, + 0.923, + 0.753 + ], + "angle": 0, + "content": "[622] T. Tiwari and G. E. Suh, \"Sequence-level analysis of leakage risk of training data in large language models,\" arXiv preprint arXiv:2412.11302, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.753, + 0.923, + 0.812 + ], + "angle": 0, + "content": "[623] H. Shao, J. Huang, S. Zheng, and K. C.-C. Chang, \"Quantifying association capabilities of large language models and its implications on privacy leakage,\" arXiv preprint arXiv:2305.12707, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.812, + 0.923, + 0.856 + ], + "angle": 0, + "content": "[624] Y. More, P. Ganesh, and G. Farnadi, \"Towards more realistic extraction attacks: An adversarial perspective,\" arXiv preprint arXiv:2407.02596, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.856, + 0.923, + 0.913 + ], + "angle": 0, + "content": "[625] R. Staab, M. Vero, M. Balunović, and M. Vechev, \"Beyond memorization: Violating privacy via inference with large language models,\" arXiv preprint arXiv:2310.07298, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.913, + 0.923, + 0.944 + ], + "angle": 0, + "content": "[626] H. Xu, Z. Zhang, X. Yu, Y. Wu, Z. Zha, B. Xu, W. Xu, M. Hu, and K. Peng, \"Targeted training data extrac" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "58" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.492, + 0.098 + ], + "angle": 0, + "content": "tion—neighborhood comparison-based membership inference attacks in large language models,\" Applied Sciences, vol. 14, no. 16, p. 7118, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.098, + 0.492, + 0.141 + ], + "angle": 0, + "content": "[627] A. Karamolegkou, J. Li, L. Zhou, and A. Søgaard, \"Copyright violations and large language models,\" arXiv preprint arXiv:2310.13771, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.142, + 0.492, + 0.199 + ], + "angle": 0, + "content": "[628] X. Zheng, H. Han, S. Shi, Q. Fang, Z. Du, X. Hu, and Q. Guo, \"Inputsnatch: Stealing input in llm services via timing side-channel attacks,\" arXiv preprint arXiv:2411.18191, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.2, + 0.492, + 0.257 + ], + "angle": 0, + "content": "[629] Y. Dong, R. Mu, G. Jin, Y. Qi, J. Hu, X. Zhao, J. Meng, W. Ruan, and X. Huang, \"Building guardrails for large language models,\" arXiv preprint arXiv:2402.01822, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.259, + 0.492, + 0.33 + ], + "angle": 0, + "content": "[630] N. Jain, A. Schwarzschild, Y. Wen, G. Somepalli, J. Kirchenbauer, P. yeh Chiang, M. Goldblum, A. Saha, J. Geiping, and T. Goldstein, \"Baseline defenses for adversarial attacks against aligned language models,\" 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.331, + 0.492, + 0.403 + ], + "angle": 0, + "content": "[631] H. Lin, Y. Lao, T. Geng, T. Yu, and W. Zhao, \"Uniguardian: A unified defense for detecting prompt injection, backdoor attacks and adversarial attacks in large language models,\" arXiv preprint arXiv:2502.13141, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.403, + 0.492, + 0.476 + ], + "angle": 0, + "content": "[632] Z. Hu, G. Wu, S. Mitra, R. Zhang, T. Sun, H. Huang, and V. Swaminathan, \"Token-level adversarial prompt detection based on perplexity measures and contextual information,\" in ICLR 2025 Workshop on Building Trust in Language Models and Applications, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.477, + 0.492, + 0.55 + ], + "angle": 0, + "content": "[633] Y. Gou, K. Chen, Z. Liu, L. Hong, H. Xu, Z. Li, D.-Y. Yeung, J. T. Kwok, and Y. Zhang, \"Eyes closed, safety on: Protecting multimodal llms via image-to-text transformation,\" in European Conference on Computer Vision, 2024, pp. 388-404." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.55, + 0.492, + 0.607 + ], + "angle": 0, + "content": "[634] S. Armstrong, M. Franklin, C. Stevens, and R. Gorman, \"Defense against the dark prompts: Mitigating best-of-n jailbreaking with prompt evaluation,\" arXiv preprint arXiv:2107.03374, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.608, + 0.492, + 0.695 + ], + "angle": 0, + "content": "[635] Y. Xie, M. Fang, R. Pi, and N. Gong, \"GradSafe: Detecting jailbreak prompts for LLMs via safety-critical gradient analysis,\" in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), L.-W. Ku, A. Martins, and V. Srikumar, Eds., 2024, pp. 507-518." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.695, + 0.492, + 0.753 + ], + "angle": 0, + "content": "[636] B. Peng, Z. Bi, Q. Niu, M. Liu, P. Feng, T. Wang, L. K. Yan, Y. Wen, Y. Zhang, and C. H. Yin, \"Jailbreaking and mitigation of vulnerabilities in large language models,\" arXiv preprint arXiv:2410.15236, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.754, + 0.492, + 0.812 + ], + "angle": 0, + "content": "[637] A. Kumar, C. Agarwal, S. Srinivas, A. J. Li, S. Feizi, and H. Lakkaraju, \"Certifying LLM safety against adversarial prompting,\" in First Conference on Language Modeling, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.812, + 0.492, + 0.87 + ], + "angle": 0, + "content": "[638] X. Zhang, C. Zhang, T. Li, Y. Huang, X. Jia, M. Hu, J. Zhang, Y. Liu, S. Ma, and C. Shen, \"Jailguard: A universal detection framework for llm prompt-based attacks,\" arXiv preprint arXiv:2312.10766, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.87, + 0.492, + 0.928 + ], + "angle": 0, + "content": "[639] Y. Liu, Y. Jia, R. Geng, J. Jia, and N. Z. Gong, \"Formalizing and benchmarking prompt injection attacks and defenses,\" in Proceedings of the 33rd USENIX Conference on Security Symposium, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.929, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[640] X. Suo, \"Signed-prompt: A new approach to prevent" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.923, + 0.098 + ], + "angle": 0, + "content": "prompt injection attacks against llm-integrated applications,\" in AIP Conference Proceedings, vol. 3194, no. 1. AIP Publishing, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.098, + 0.923, + 0.17 + ], + "angle": 0, + "content": "[641] L. Yan, Z. Zhang, G. Tao, K. Zhang, X. Chen, G. Shen, and X. Zhang, \"Parafuzz: An interpretability-driven technique for detecting poisoned samples in nlp,\" Advances in Neural Information Processing Systems, vol. 36, pp. 66755-66767, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.17, + 0.923, + 0.242 + ], + "angle": 0, + "content": "[642] X. Hu, P.-Y. Chen, and T.-Y. Ho, \"Gradient cuff: Detecting jailbreak attacks on large language models by exploring refusal loss landscapes,\" in Advances in Neural Information Processing Systems, vol. 37, 2024, pp. 126-265-126-296." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.243, + 0.923, + 0.273 + ], + "angle": 0, + "content": "[643] G. Alon and M. J. Kamfonas, \"Detecting language model attacks with perplexity,\" 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.273, + 0.923, + 0.33 + ], + "angle": 0, + "content": "[644] J. Ji, B. Hou, A. Robey, G. J. Pappas, H. Hassani, Y. Zhang, E. Wong, and S. Chang, \"Defending large language models against jailbreak attacks via semantic smoothing,\" CoRR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.33, + 0.923, + 0.389 + ], + "angle": 0, + "content": "[645] M. Phute, A. Helbling, M. Hull, S. Peng, S. Szyller, C. Cornelius, and D. H. Chau, \"Llm self defense: By self examination, llms know they are being tricked,\" arXiv preprint arXiv:2308.07308, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.39, + 0.923, + 0.447 + ], + "angle": 0, + "content": "[646] L. N. Candogan, Y. Wu, E. A. Rocamora, G. G. Chrysos, and V. Cevher, \"Single-pass detection of jailbreaking input in large language models,\" arXiv preprint arXiv:2502.15435, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.448, + 0.923, + 0.535 + ], + "angle": 0, + "content": "[647] B. Cao, Y. Cao, L. Lin, and J. Chen, “Defending against alignment-breaking attacks via robustly aligned LLM,” in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), L.-W. Ku, A. Martins, and V. Srikumar, Eds., 2024, pp. 10542-10560." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.535, + 0.923, + 0.594 + ], + "angle": 0, + "content": "[648] Y. Zhang, L. Ding, L. Zhang, and D. Tao, \"Intention analysis makes LLMs a good jailbreak defender,\" in Proceedings of the 31st International Conference on Computational Linguistics, 2025, pp. 2947-2968." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.594, + 0.923, + 0.679 + ], + "angle": 0, + "content": "[649] S. Han, K. Rao, A. Ettinger, L. Jiang, B. Y. Lin, N. Lambert, Y. Choi, and N. Dziri, \"Wildguard: Open one-stop moderation tools for safety risks, jailbreaks, and refusals of llms,\" in The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.68, + 0.923, + 0.738 + ], + "angle": 0, + "content": "[650] M. Pisano, P. Ly, A. Sanders, B. Yao, D. Wang, T. Strzalkowski, and M. Si, \"Bergeron: Combating adversarial attacks through a conscience-based alignment framework,\" arXiv preprint arXiv:2312.00029, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.739, + 0.923, + 0.796 + ], + "angle": 0, + "content": "[651] A. Robey, E. Wong, H. Hassani, and G. J. Pappas, \"Smoothllm: Defending large language models against jailbreaking attacks,\" arXiv preprint arXiv:2310.03684, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.798, + 0.923, + 0.899 + ], + "angle": 0, + "content": "[652] J. Ji, B. Hou, Z. Zhang, G. Zhang, W. Fan, Q. Li, Y. Zhang, G. Liu, S. Liu, and S. Chang, \"Advancing the robustness of large language models through self-denoised smoothing,\" in Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers), 2024, pp. 246-257." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.9, + 0.923, + 0.944 + ], + "angle": 0, + "content": "[653] J. Yi, Y. Xie, B. Zhu, K. Hines, E. Kiciman, G. Sun, X. Xie, and F. Wu, \"Benchmarking and defending against indirect prompt injection attacks on large lan" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "59" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.314, + 0.069 + ], + "angle": 0, + "content": "guage models,\" CoRR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.069, + 0.492, + 0.127 + ], + "angle": 0, + "content": "[654] X. Song, S. Duan, and G. Liu, \"Alis: Aligned llm instruction security strategy for unsafe input prompt,\" in Proceedings of the 31st International Conference on Computational Linguistics, 2025, pp. 9124-9146." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.127, + 0.492, + 0.199 + ], + "angle": 0, + "content": "[655] Y. Wang, Z. Shi, A. Bai, and C.-J. Hsieh, \"Defending Ilms against jailbreaking attacks via backtranslation,\" in Findings of the Association for Computational Linguistics: ACL 2024, L.-W. Ku, A. Martins, and V. Srikumar, Eds., 2024, pp. 16031-16046." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.199, + 0.492, + 0.243 + ], + "angle": 0, + "content": "[656] E. Zverev, S. Abdelnabi, M. Fritz, and C. H. Lampert, \"Can LLMs separate instructions from data? and what do we even mean by that?\" CoRR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.243, + 0.492, + 0.3 + ], + "angle": 0, + "content": "[657] Y. Dong, R. Mu, G. Jin, Y. Qi, J. Hu, X. Zhao, J. Meng, W. Ruan, and X. Huang, \"Building guardrails for large language models,\" arXiv preprint arXiv:2402.01822, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.301, + 0.492, + 0.375 + ], + "angle": 0, + "content": "[658] D. Kumar, Y. A. AbuHashem, and Z. Durmeric, \"Watch your language: Investigating content moderation with large language models,\" in Proceedings of the International AAAI Conference on Web and Social Media, vol. 18, 2024, pp. 865-878." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.375, + 0.492, + 0.462 + ], + "angle": 0, + "content": "[659] T. Rebedea, R. Dinu, M. N. Sreedhar, C. Parisien, and J. Cohen, \"Nemo guardrails: A toolkit for controllable and safe llm applications with programmable rails,\" in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, 2023, pp. 431-445." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.462, + 0.492, + 0.52 + ], + "angle": 0, + "content": "[660] OpenAI, \"Improving model safety behavior with rule-based rewards,\" https://openai.com/index/improving-model-safety-behavior-with-rule-based-re 2025, accessed: 2025-03-24." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.521, + 0.492, + 0.579 + ], + "angle": 0, + "content": "[661] H. Ma, C. Zhang, H. Fu, P. Zhao, and B. Wu, \"Adapting large language models for content moderation: Pitfalls in data engineering and supervised fine-tuning,\" arXiv preprint arXiv:2310.03400, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.579, + 0.492, + 0.637 + ], + "angle": 0, + "content": "[662] M. Phute, A. Helbling, M. Hull, S. Peng, S. Szyller, C. Cornelius, and D. H. Chau, \"Llm self defense: By self examination, llms know they are being tricked,\" arXiv preprint arXiv:2308.07308, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.637, + 0.492, + 0.695 + ], + "angle": 0, + "content": "[663] Z. Gou, Z. Shao, Y. Gong, Y. Shen, Y. Yang, N. Duan, and W. Chen, \"Critic: Large language models can self-correct with tool-interactive critiquing,\" arXiv preprint arXiv:2305.11738, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.695, + 0.492, + 0.768 + ], + "angle": 0, + "content": "[664] C. Lu, S. Holt, C. Fanconi, A. J. Chan, J. Foerster, M. van der Schaar, and R. T. Lange, \"Discovering preference optimization algorithms with and for large language models,\" in Advances in Neural Information Processing Systems, vol. 37, 2024, pp. 86528-86573." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.768, + 0.492, + 0.842 + ], + "angle": 0, + "content": "[665] A. Madaan, N. Tandon, P. Gupta, S. Hallinan, L. Gao, S. Wiegreffe, U. Alon, N. Dziri, S. Prabhumoye, Y. Yang et al., \"Self-refine: Iterative refinement with self-feedback,\" Advances in Neural Information Processing Systems, vol. 36, pp. 46534-46594, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.842, + 0.492, + 0.927 + ], + "angle": 0, + "content": "[666] D. Jiang, X. Ren, and B. Y. Lin, \"Llm-blender: Ensemble large language models with pairwise ranking and generative fusion,\" in Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2023, pp. 14165-14178." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.928, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[667] Z. Lai, X. Zhang, and S. Chen, \"Adaptive ensembles" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.923, + 0.099 + ], + "angle": 0, + "content": "of fine-tuned transformers for llm-generated text detection,\" in 2024 International Joint Conference on Neural Networks. IEEE, 2024, pp. 1-7." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.098, + 0.923, + 0.155 + ], + "angle": 0, + "content": "[668] C. Xiong, X. Qi, P.-Y. Chen, and T.-Y. Ho, \"Defensive prompt patch: A robust and interpretable defense of llms against jailbreak attacks,\" arXiv preprint arXiv:2405.20099, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.155, + 0.923, + 0.214 + ], + "angle": 0, + "content": "[669] Z. Zhang, Q. Zhang, and J. Foerster, “Parden, can you repeat that? defending against jailbreaks via repetition,” in Proceedings of the 41st International Conference on Machine Learning, 2024, pp. 60271-60287." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.214, + 0.923, + 0.287 + ], + "angle": 0, + "content": "[670] Z. Yuan, Z. Xiong, Y. Zeng, N. Yu, R. Jia, D. Song, and B. Li, \"Rigorllm: resilient guardrails for large language models against undesired content,\" in Proceedings of the 41st International Conference on Machine Learning, 2024, pp. 57-953-57-965." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.287, + 0.923, + 0.346 + ], + "angle": 0, + "content": "[671] M. Cao, M. Fatemi, J. C. Cheung, and S. Shabanian, \"Systematic rectification of language models via dead-end analysis,\" in The Eleventh International Conference on Learning Representations, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.346, + 0.923, + 0.403 + ], + "angle": 0, + "content": "[672] F. Faal, K. Schmitt, and J. Y. Yu, \"Reward modeling for mitigating toxicity in transformer-based language models,\" Applied Intelligence, vol. 53, no. 7, p. 8421-8435, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.403, + 0.923, + 0.476 + ], + "angle": 0, + "content": "[673] W. Zeng, Y. Liu, R. Mullins, L. Peran, J. Fernandez, H. Harkous, K. Narasimhan, D. Proud, P. Kumar, B. Radharapu et al., \"Shieldgemma: Generative ai content moderation based on gemma,\" arXiv preprint arXiv:2407.21772, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.476, + 0.923, + 0.55 + ], + "angle": 0, + "content": "[674] Z. Wang, F. Yang, L. Wang, P. Zhao, H. Wang, L. Chen, *ards/, Q. Lin, and K.-F. Wong, \"SELF-GUARD: Empower the LLM to safeguard itself,\" in *Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics*, 2024, pp. 1648-1668." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.55, + 0.923, + 0.607 + ], + "angle": 0, + "content": "[675] S. Ghosh, P. Varshney, E. Galinkin, and C. Parisien, \"Aegis: Online adaptive ai content safety moderation with ensemble of llm experts,\" arXiv preprint arXiv:2404.05993, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.608, + 0.923, + 0.709 + ], + "angle": 0, + "content": "[676] W. Wang, J.-T. Huang, W. Wu, J. Zhang, Y. Huang, S. Li, P. He, and M. R. Lyu, \"Mttm: Metamorphic testing for textual content moderation software,\" 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE), pp. 2387-2399, 2023. [Online]. Available: https://api-semanticscholar.org/ CorpusID:256826966" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.709, + 0.923, + 0.753 + ], + "angle": 0, + "content": "[677] K.-L. Chiu, A. Collins, and R. Alexander, \"Detecting hate speech with gpt-3,\" arXiv preprint arXiv:2103.12407, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.753, + 0.923, + 0.811 + ], + "angle": 0, + "content": "[678] J. Kim, A. Derakhshan, and I. G. Harris, \"Robust safety classifier for large language models: Adversarial prompt shield,\" arXiv preprint arXiv:2311.00172, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.811, + 0.923, + 0.885 + ], + "angle": 0, + "content": "[679] B. Krause, A. D. Gotmare, B. McCann, N. S. Keskar, S. Joty, R. Socher, and N. F. Rajani, \"Gedi: Generative discriminator guided sequence generation,\" in Findings of the Association for Computational Linguistics: EMNLP 2021, 2021, pp. 4929-4952." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.885, + 0.923, + 0.943 + ], + "angle": 0, + "content": "[680] Q. Liu, Z. Zhou, L. He, Y. Liu, W. Zhang, and S. Su, \"Alignment-enhanced decoding: Defending jailbreaks via token-level adaptive refining of probability distributions,\" in Proceedings of the 2024 Conference on" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "60" + }, + { + "type": "ref_text", + "bbox": [ + 0.118, + 0.054, + 0.492, + 0.083 + ], + "angle": 0, + "content": "Empirical Methods in Natural Language Processing, 2024, pp. 2802-2816." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.084, + 0.493, + 0.169 + ], + "angle": 0, + "content": "[681] A. Liu, M. Sap, X. Lu, S. Swayamdipta, C. Bhagavatula, N. A. Smith, and Y. Choi, \"Dexperts: Decoding-time controlled text generation with experts and anti-experts,\" in Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics, 2021, pp. 6691-6706." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.17, + 0.492, + 0.227 + ], + "angle": 0, + "content": "[682] T. Radcliffe, E. Lockhart, and J. Wetherington, \"Automated prompt engineering for semantic vulnerabilities in large language models,\" Authorea Preprints, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.228, + 0.492, + 0.287 + ], + "angle": 0, + "content": "[683] F. Trad and A. Chehab, \"Prompt engineering or finetuning? a case study on phishing detection with large language models,\" Machine Learning and Knowledge Extraction, vol. 6, no. 1, pp. 367-384, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.288, + 0.492, + 0.359 + ], + "angle": 0, + "content": "[684] A. Zhou, B. Li, and H. Wang, \"Robust prompt optimization for defending language models against jailbreaking attacks,\" in Advances in Neural Information Processing Systems, vol. 37. Curran Associates, Inc., 2024, pp. 40184-40211." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.36, + 0.492, + 0.418 + ], + "angle": 0, + "content": "[685] Y. Mo, Y. Wang, Z. Wei, and Y. Wang, \"Fight back against jailbreaking via prompt adversarial tuning,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.418, + 0.492, + 0.476 + ], + "angle": 0, + "content": "[686] Y. Zhang, L. Ding, L. Zhang, and D. Tao, \"Intention analysis makes lms a good jailbreak defender,\" in Proceedings of the 31st International Conference on Computational Linguistics, 2025, pp. 2947-2968." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.477, + 0.492, + 0.534 + ], + "angle": 0, + "content": "[687] Y. Chen, H. Li, Z. Zheng, Y. Song, D. Wu, and B. Hooi, \"Defense against prompt injection attack by leveraging attack techniques,\" arXiv preprint arXiv:2411.00459, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.535, + 0.492, + 0.607 + ], + "angle": 0, + "content": "[688] Z. Zhang, J. Yang, P. Ke, F. Mi, H. Wang, and M. Huang, \"Defending large language models against jailbreaking attacks through goal prioritization,\" in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics, 2023, pp. 8865-8887." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.608, + 0.492, + 0.666 + ], + "angle": 0, + "content": "[689] Y. Xie, J. Yi, J. Shao, J. Curl, L. Lyu, Q. Chen, X. Xie, and F. Wu, \"Defending chatgpt against jailbreak attack via self-reminders,\" Nature Machine Intelligence, vol. 5, no. 12, pp. 1486–1496, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.666, + 0.492, + 0.709 + ], + "angle": 0, + "content": "[690] S. Chen, J. Piet, C. Sitawarin, and D. Wagner, \"Struq: Defending against prompt injection with structured queries,\" arXiv preprint arXiv:2402.06363, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.71, + 0.492, + 0.767 + ], + "angle": 0, + "content": "[691] K. Hines, G. Lopez, M. Hall, F. Zarfati, Y. Zunger, and E. Kiciman, \"Defending against indirect prompt injection attacks with spotlighting,\" arXiv preprint arXiv:2403.14720, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.768, + 0.492, + 0.825 + ], + "angle": 0, + "content": "[692] S. Slocum and D. Hadfield-Menell, \"Inverse prompt engineering for task-specific LLM safety,\" 2025. [Online]. Available: https://openreview.net/forum? id=3MDmM0rMPQ" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.826, + 0.492, + 0.869 + ], + "angle": 0, + "content": "[693] K. Edemacu and X. Wu, \"Privacy preserving prompt engineering: A survey,\" arXiv preprint arXiv:2404.06001, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.87, + 0.492, + 0.928 + ], + "angle": 0, + "content": "[694] S. Utpala, S. Hooker, and P.-Y. Chen, \"Locally differentially private document generation using zero shot prompting,\" in Findings of the Association for Computational Linguistics: EMNLP 2023, 2023, pp. 8442-8457." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.929, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[695] H. Duan, A. Dziedzic, N. Papernot, and F. Boenisch," + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.493, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.923, + 0.111 + ], + "angle": 0, + "content": "\"Flocks of stochastic parrots: Differentially private prompt learning for large language models,\" Advances in Neural Information Processing Systems, vol. 36, pp. 76852-76871, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.112, + 0.923, + 0.199 + ], + "angle": 0, + "content": "[696] W. Wang, W. Jiao, J. Huang, R. Dai, J.-T. Huang, Z. Tu, and M. R. Lyu, \"Not all countries celebrate thanksgiving: On the cultural dominance in large language models,\" ArXiv, vol. abs/2310.12481, 2023. [Online]. Available: https://api_semanticscholar.org/ CorpusID:264305810" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.2, + 0.923, + 0.257 + ], + "angle": 0, + "content": "[697] M. Kaneko, D. Bollegala, N. Okazaki, and T. Baldwin, \"Evaluating gender bias in large language models via chain-of-thought prompting,\" arXiv preprint arXiv:2401.15585, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.258, + 0.923, + 0.331 + ], + "angle": 0, + "content": "[698] X. He, S. Zannettou, Y. Shen, and Y. Zhang, \"You only prompt once: On the capabilities of prompt learning on large language models to tackle toxic content,\" in 2024 IEEE Symposium on Security and Privacy (SP). IEEE, 2024, pp. 770-787." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.332, + 0.923, + 0.374 + ], + "angle": 0, + "content": "[699] X. Zou, Y. Chen, and K. Li, \"Is the system message really important to jailbreaks in large language models?\" arXiv preprint arXiv:2402.14857, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.375, + 0.923, + 0.432 + ], + "angle": 0, + "content": "[700] R. Xu, Z. Qi, and W. Xu, \"Preemptive answer \"attacks\" on chain-of-thought reasoning,\" in Findings of the Association for Computational Linguistics ACL 2024, 2024, pp. 14708-14726." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.433, + 0.923, + 0.521 + ], + "angle": 0, + "content": "[701] C. Zheng, F. Yin, H. Zhou, F. Meng, J. Zhou, K.-W. Chang, M. Huang, and N. Peng, \"On prompt-driven safeguarding for large language models,\" in Proceedings of the 41st International Conference on Machine Learning, ser. Proceedings of Machine Learning Research, vol. 235, 21-27 Jul 2024, pp. 61-613." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.521, + 0.923, + 0.593 + ], + "angle": 0, + "content": "[702] Y. Wang, X. Liu, Y. Li, M. Chen, and C. Xiao, \"Adashield: Safeguarding multimodal large language models from structure-based attack via adaptive shield prompting,\" in European Conference on Computer Vision. Springer, 2024, pp. 77-94." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.594, + 0.923, + 0.65 + ], + "angle": 0, + "content": "[703] Z. Shi, Z. Wang, Y. Su, W. Luo, H. Gao, F. Yang, R. Tang, and Y. Zhang, \"Robustness-aware automatic prompt optimization,\" arXiv preprint arXiv:2412.18196, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.651, + 0.923, + 0.724 + ], + "angle": 0, + "content": "[704] Y. Wu, Y. Gao, B. Zhu, Z. Zhou, X. Sun, S. Yang, J.-G. Lou, Z. Ding, and L. Yang, \"Strago: Harnessing strategic guidance for prompt optimization,\" in Findings of the Association for Computational Linguistics: EMNLP 2024, 2024, pp. 10043-10061." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.725, + 0.923, + 0.781 + ], + "angle": 0, + "content": "[705] F. Wu, N. Zhang, S. Jha, P. McDaniel, and C. Xiao, \"A new era in llm security: Exploring security concerns in real-world llm-based systems,\" arXiv preprint arXiv:2402.18649, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.782, + 0.923, + 0.87 + ], + "angle": 0, + "content": "[706] A. Borzunov, M. Ryabinin, A. Chumachenko, D. Baranchuk, T. Dettmers, Y. Belkada, P. Samygin, and C. A. Raffel, \"Distributed inference and finetuning of large language models over the internet,\" Advances in neural information processing systems, vol. 36, pp. 12312-12331, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.87, + 0.923, + 0.943 + ], + "angle": 0, + "content": "[707] A. Agrawal, N. Kedia, A. Panwar, J. Mohan, N. Kwa-tra, B. Gulavani, A. Tumanov, and R. Ramjee, \"Taming {Throughput-Latency} tradeoff in {LLM} inference with {Sarathi-Serve}\", in 18th USENIX Symposium on Operating Systems Design and Implementation (OSDI" + }, + { + "type": "list", + "bbox": [ + 0.509, + 0.054, + 0.923, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.921, + 0.043 + ], + "angle": 0, + "content": "61" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.274, + 0.068 + ], + "angle": 0, + "content": "24), 2024, pp. 117-134." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.069, + 0.492, + 0.156 + ], + "angle": 0, + "content": "[708] Y. Zhong, S. Liu, J. Chen, J. Hu, Y. Zhu, X. Liu, X. Jin, and H. Zhang, \" \\(\\{\\mathrm{DistServe}\\}\\): Disaggregating prefill and decoding for goodput-optimized large language model serving,\" in 18th USENIX Symposium on Operating Systems Design and Implementation (OSDI 24), 2024, pp. 193-210." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.157, + 0.492, + 0.214 + ], + "angle": 0, + "content": "[709] H. Sun, Z. Chen, X. Yang, Y. Tian, and B. Chen, \"Tri force: Lossless acceleration of long sequence generation with hierarchical speculative decoding,\" in First Conference on Language Modeling, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.215, + 0.492, + 0.287 + ], + "angle": 0, + "content": "[710] T. Cai, Y. Li, Z. Geng, H. Peng, J. D. Lee, D. Chen, and T. Dao, \"Medusa: Simple LLM inference acceleration framework with multiple decoding heads,\" in Proceedings of the 41st International Conference on Machine Learning, vol. 235. PMLR, 2024, pp. 5209-5235." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.288, + 0.492, + 0.359 + ], + "angle": 0, + "content": "[711] J. Chen, V. Tiwari, R. Sadhukhan, Z. Chen, J. Shi, I. E.-H. Yen, and B. Chen, \"Magicdec: Breaking the latency-throughput tradeoff for long context generation with speculative decoding,\" arXiv preprint arXiv:2408.11049, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.36, + 0.492, + 0.446 + ], + "angle": 0, + "content": "[712] C. Holmes, M. Tanaka, M. Wyatt, A. A. Awan, J. Rasley, S. Rajbhandari, R. Y. Aminabadi, H. Qin, A. Bakhtiari, L. Kurilenko et al., \"Deepspeed-fastgen: High-throughput text generation for llms via mii and deepspeed-inference,\" arXiv preprint arXiv:2401.08671, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.447, + 0.492, + 0.521 + ], + "angle": 0, + "content": "[713] R. Svirschevski, A. May, Z. Chen, B. Chen, Z. Jia, and M. Ryabinin, \"Specexec: Massively parallel speculative decoding for interactive lmm inference on consumer devices,\" Advances in Neural Information Processing Systems, vol. 37, pp. 16342-16368, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.521, + 0.492, + 0.607 + ], + "angle": 0, + "content": "[714] P. Wang, D. Zhang, L. Li, C. Tan, X. Wang, M. Zhang, K. Ren, B. Jiang, and X. Qiu, \"Inferaligner: Inference-time alignment for harmlessness through cross-model guidance,\" in Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, 2024, pp. 10460-10479." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.608, + 0.492, + 0.665 + ], + "angle": 0, + "content": "[715] X. Wang, D. Wu, Z. Ji, Z. Li, P. Ma, S. Wang, Y. Li, Y. Liu, N. Liu, and J. Rahmel, \"Selfdefend: Llms can defend themselves against jailbreaking in a practical manner,\" CoRR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.665, + 0.492, + 0.723 + ], + "angle": 0, + "content": "[716] X. Hu, P.-Y. Chen, and T.-Y. Ho, \"Gradient cuff: Detecting jailbreak attacks on large language models by exploring refusal loss landscapes,\" arXiv preprint arXiv:2403.00867, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.724, + 0.492, + 0.768 + ], + "angle": 0, + "content": "[717] R. K. Sharma, V. Gupta, and D. Grossman, \"Spml: A dsl for defending language models against prompt attacks,\" arXiv preprint arXiv:2402.11755, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.768, + 0.492, + 0.855 + ], + "angle": 0, + "content": "[718] J. Zhao, S. Wang, Y. Zhao, X. Hou, K. Wang, P. Gao, Y. Zhang, C. Wei, and H. Wang, \"Models are codes: Towards measuring malicious code poisoning attacks on pre-trained model hubs,\" in Proceedings of the 39th IEEE/ACM International Conference on Automated Software Engineering, 2024, pp. 2087-2098." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.856, + 0.492, + 0.913 + ], + "angle": 0, + "content": "[719] S. Ghosh, P. Varshney, E. Galinkin, and C. Parisien, \"Aegis: Online adaptive ai content safety moderation with ensemble of llm experts,\" arXiv preprint arXiv:2404.05993, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.914, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[720] S. Ghosh, P. Varshney, M. N. Sreedhar, A. Padmakumar, T. Rebedea, J. R. Varghese, and C. Parisien," + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.923, + 0.098 + ], + "angle": 0, + "content": "\"Aegis2.0: A diverse ai safety dataset and risks taxonomy for alignment of llm guardrails,\" in Neurips Safe Generative AI Workshop 2024, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.098, + 0.923, + 0.156 + ], + "angle": 0, + "content": "[721] S. Han, K. Rao, A. Ettinger, L. Jiang, B. Y. Lin, N. Lambert, Y. Choi, and N. Dziri, \"Wildguard: Open one-stop moderation tools for safety risks, jailbreaks, and refusals of llms,\" arXiv preprint arXiv:2406.18495, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.157, + 0.923, + 0.228 + ], + "angle": 0, + "content": "[722] W. Zeng, Y. Liu, R. Mullins, L. Peran, J. Fernandez, H. Harkous, K. Narasimhan, D. Proud, P. Kumar, B. Radharapu et al., \"Shieldgemma: Generative ai content moderation based on gemma,\" arXiv preprint arXiv:2407.21772, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.229, + 0.923, + 0.287 + ], + "angle": 0, + "content": "[723] Y. Liu, H. Gao, S. Zhai, J. Xia, T. Wu, Z. Xue, Y. Chen, K. Kawaguchi, J. Zhang, and B. Hooi, \"Guardreasoner: Towards reasoning-based llm safeguards,\" arXiv preprint arXiv:2501.18492, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.288, + 0.923, + 0.33 + ], + "angle": 0, + "content": "[724] C. Wang, Y. Liu, B. Li, D. Zhang, Z. Li, and J. Fang, \"Safety in large reasoning models: A survey,\" arXiv preprint arXiv:2504.17704, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.331, + 0.923, + 0.389 + ], + "angle": 0, + "content": "[725] H. Jin, A. Zhou, J. Menke, and H. Wang, \"Jailbreaking large language models against moderation guardrails via cipher characters,\" Advances in Neural Information Processing Systems, vol. 37, pp. 59408-59435, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.39, + 0.923, + 0.447 + ], + "angle": 0, + "content": "[726] D. Ran, J. Liu, Y. Gong, J. Zheng, X. He, T. Cong, and A. Wang, \"Jailbreak: An integrated toolkit for evaluating jailbreak attempts against large language models,\" arXiv preprint arXiv:2406.09321, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.447, + 0.923, + 0.505 + ], + "angle": 0, + "content": "[727] H. Qiu, S. Zhang, A. Li, H. He, and Z. Lan, \"Latent jailbreak: A benchmark for evaluating text safety and output robustness of large language models,\" arXiv preprint arXiv:2307.08487, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.505, + 0.923, + 0.593 + ], + "angle": 0, + "content": "[728] K. Zhu, J. Wang, J. Zhou, Z. Wang, H. Chen, Y. Wang, L. Yang, W. Ye, Y. Zhang, N. Gong et al., \"Promptrobust: Towards evaluating the robustness of large language models on adversarial prompts,\" in Proceedings of the 1st ACM Workshop on Large AI Systems and Models with Privacy and Safety Analysis, 2023, pp. 57-68." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.594, + 0.923, + 0.665 + ], + "angle": 0, + "content": "[729] A. Pei, Z. Yang, S. Zhu, R. Cheng, and J. Jia, \"Selfprompt: Autonomously evaluating llm robustness via domain-constrained knowledge guidelines and refined adversarial prompts,\" arXiv preprint arXiv:2412.00765, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.665, + 0.923, + 0.723 + ], + "angle": 0, + "content": "[730] Z. Xu, Y. Liu, G. Deng, Y. Li, and S. Picek, \"A comprehensive study of jailbreak attack versus defense for large language models,\" arXiv preprint arXiv:2402.13457, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.724, + 0.923, + 0.781 + ], + "angle": 0, + "content": "[731] K. Chen, Y. Liu, D. Wang, J. Chen, and W. Wang, \"Characterizing and evaluating the reliability of llms against jailbreak attacks,\" arXiv preprint arXiv:2408.09326, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.782, + 0.923, + 0.854 + ], + "angle": 0, + "content": "[732] B. Wang, C. Xu, S. Wang, Z. Gan, Y. Cheng, J. Gao, A. H. Awadallah, and B. Li, \"Adversarial glue: A multi-task benchmark for robustness evaluation of language models,\" arXiv preprint arXiv:2111.02840, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.855, + 0.923, + 0.943 + ], + "angle": 0, + "content": "[733] G. Dong, J. Zhao, T. Hui, D. Guo, W. Wang, B. Feng, Y. Qiu, Z. Gongque, K. He, Z. Wang et al., \"Revisit input perturbation problems for llms: A unified robustness evaluation framework for noisy slot filling task,\" in CCF International Conference on Natural Language Processing and Chinese Computing. Springer," + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "62" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.245, + 0.069 + ], + "angle": 0, + "content": "2023, pp. 682-694." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.07, + 0.492, + 0.112 + ], + "angle": 0, + "content": "[734] J. Zheng, A. Ritter, and W. Xu, \"Neo-bench: Evaluating robustness of large language models with neologisms,\" arXiv preprint arXiv:2402.12261, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.113, + 0.492, + 0.169 + ], + "angle": 0, + "content": "[735] Y. Li, Y. Guo, F. Guerin, and C. Lin, \"Evaluating large language models for generalization and robustness via data compression,\" arXiv preprint arXiv:2402.00861, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.17, + 0.492, + 0.242 + ], + "angle": 0, + "content": "[736] Q. Zhang, H. Qiu, D. Wang, Y. Li, T. Zhang, W. Zhu, H. Weng, L. Yan, and C. Zhang, “A benchmark for semantic sensitive information in llms outputs,” in The Thirteenth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.243, + 0.492, + 0.302 + ], + "angle": 0, + "content": "[737] A. Wang, A. Singh, J. Michael, F. Hill, O. Levy, and S. R. Bowman, \"Glue: A multi-task benchmark and analysis platform for natural language understanding,\" arXiv preprint arXiv:1804.07461, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.302, + 0.492, + 0.36 + ], + "angle": 0, + "content": "[738] J. Li, X. Cheng, W. X. Zhao, J.-Y. Nie, and J.-R. Wen, \"Halueval: A large-scale hallucination evaluation benchmark for large language models,\" arXiv preprint arXiv:2305.11747, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.36, + 0.492, + 0.404 + ], + "angle": 0, + "content": "[739] A. Pal, L. K. Umapathi, and M. Sankarasubbu, \"Med-halt: Medical domain hallucination test for large language models,\" arXiv preprint arXiv:2307.15343, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.404, + 0.492, + 0.462 + ], + "angle": 0, + "content": "[740] Z. Ji, Y. Gu, W. Zhang, C. Lyu, D. Lin, and K. Chen, \"Anah: Analytical annotation of hallucinations in large language models,\" arXiv preprint arXiv:2405.20315, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.462, + 0.492, + 0.52 + ], + "angle": 0, + "content": "[741] P. Manakul, A. Liusie, and M. J. Gales, \"Selfcheck-gpt: Zero-resource black-box hallucination detection for generative large language models,\" arXiv preprint arXiv:2303.08896, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.521, + 0.492, + 0.579 + ], + "angle": 0, + "content": "[742] Y.-S. Chuang, Y. Xie, H. Luo, Y. Kim, J. Glass, and P. He, \"Dola: Decoding by contrasting layers improves factuality in large language models,\" arXiv preprint arXiv:2309.03883, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.579, + 0.492, + 0.637 + ], + "angle": 0, + "content": "[743] N. Mündler, J. He, S. Jenko, and M. Vechev, \"Self-contradictory hallucinations of large language models: Evaluation, detection and mitigation,\" arXiv preprint arXiv:2305.15852, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.637, + 0.492, + 0.707 + ], + "angle": 0, + "content": "[744] M. Elaraby, M. Lu, J. Dunn, X. Zhang, Y. Wang, S. Liu, P. Tian, Y. Wang, and Y. Wang, \"Halo: Estimation and reduction of hallucinations in open-source weak large language models,\" arXiv preprint arXiv:2308.11764, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.709, + 0.492, + 0.767 + ], + "angle": 0, + "content": "[745] Z. Ji, D. Chen, E. Ishii, S. Cahyawijaya, Y. Bang, B. Wilie, and P. Fung, \"Llm internal states reveal hallucination risk faced with a query,\" arXiv preprint arXiv:2407.03282, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.768, + 0.492, + 0.826 + ], + "angle": 0, + "content": "[746] J. Wei, Y. Yao, J.-F. Ton, H. Guo, A. Estornell, and Y. Liu, \"Measuring and reducing llm hallucination without gold-standard answers,\" arXiv preprint arXiv:2402.10412, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.826, + 0.492, + 0.884 + ], + "angle": 0, + "content": "[747] A. Deshpande, V. Murahari, T. Rajpurohit, A. Kalyan, and K. Narasimhan, \"Toxicity in chatgpt: Analyzing persona-assigned language models,\" arXiv preprint arXiv:2304.05335, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.885, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[748] A. de Wynter, I. Watts, T. Wongsangaroonsri, M. Zhang, N. Farra, N. E. Altintoprak, L. Baur, S. Claudet, P. Gajdusek, C. Gören et al., \"Rtp-lx: Can llms evaluate toxicity in multilingual scenarios?\"" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.805, + 0.069 + ], + "angle": 0, + "content": "arXiv preprint arXiv:2404.14397, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.069, + 0.923, + 0.141 + ], + "angle": 0, + "content": "[749] D. Esiobu, X. Tan, S. Hosseini, M. Ung, Y. Zhang, J. Fernandes, J. Dwivedi-Yu, E. Presani, A. Williams, and E. M. Smith, \"Robbie: Robust bias evaluation of large generative language models,\" arXiv preprint arXiv:2311.18140, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.142, + 0.923, + 0.199 + ], + "angle": 0, + "content": "[750] S. Wang, P. Wang, T. Zhou, Y. Dong, Z. Tan, and J. Li, \"Ceb: Compositional evaluation benchmark for fairness in large language models,\" arXiv preprint arXiv:2407.02408, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.2, + 0.923, + 0.258 + ], + "angle": 0, + "content": "[751] H. Li, D. Guo, D. Li, W. Fan, Q. Hu, X. Liu, C. Chan, D. Yao, Y. Yao, and Y. Song, \"Privlm-bench: A multi-level privacy evaluation benchmark for language models,\" arXiv preprint arXiv:2311.04044, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.259, + 0.923, + 0.316 + ], + "angle": 0, + "content": "[752] Q. Li, J. Hong, C. Xie, J. Tan, R. Xin, J. Hou, X. Yin, Z. Wang, D. Hendrycks, Z. Wang et al., \"Llm-pbe: Assessing data privacy in large language models,\" arXiv preprint arXiv:2408.12787, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.317, + 0.923, + 0.389 + ], + "angle": 0, + "content": "[753] D. Zhu, D. Chen, X. Wu, J. Geng, Z. Li, J. Grossklags, and L. Ma, \"Privauditor: Benchmarking data protection vulnerabilities in llm adaptation techniques,\" Advances in Neural Information Processing Systems, vol. 37, pp. 9668-9689, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.39, + 0.923, + 0.448 + ], + "angle": 0, + "content": "[754] L. Rossi, B. Marek, V. Hanke, X. Wang, M. Backes, A. Dziedzic, and F. Boenisch, \"Auditing empirical privacy protection of private llm adaptations,\" in Neurips Safe Generative AI Workshop 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.448, + 0.925, + 0.52 + ], + "angle": 0, + "content": "[755] T. Singh, H. Aditya, V. K. Madisetti, and A. Bahga, \"Whispered tuning: Data privacy preservation in finetuning llms through differential privacy,\" Journal of Software Engineering and Applications, vol. 17, no. 1, pp. 1-22, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.521, + 0.923, + 0.579 + ], + "angle": 0, + "content": "[756] H. Li, W. Hu, H. Jing, Y. Chen, Q. Hu, S. Han, T. Chu, P. Hu, and Y. Song, \"Privaci-bench: Evaluating privacy with contextual integrity and legal compliance,\" arXiv preprint arXiv:2502.17041, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.579, + 0.923, + 0.623 + ], + "angle": 0, + "content": "[757] O. Cartwright, H. Dunbar, and T. Radcliffe, “Evaluating privacy compliance in commercial large language models-chatgpt, claude, and gemini,” 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.623, + 0.923, + 0.695 + ], + "angle": 0, + "content": "[758] X. Zhou, M. Weyssow, R. Widyasari, T. Zhang, J. He, Y. Lyu, J. Chang, B. Zhang, D. Huang, and D. Lo, \"Lessleak-bench: A first investigation of data leakage in llms across 83 software engineering benchmarks,\" arXiv preprint arXiv:2502.06215, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.695, + 0.923, + 0.768 + ], + "angle": 0, + "content": "[759] Y. Song, R. Liu, S. Chen, Q. Ren, Y. Zhang, and Y. Yu, \"Securesql: Evaluating data leakage of large language models as natural language interfaces to databases,\" in Findings of the Association for Computational Linguistics: EMNLP 2024, 2024, pp. 5975-5990." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.768, + 0.923, + 0.84 + ], + "angle": 0, + "content": "[760] X. Liu, Y. Zhu, J. Gu, Y. Lan, C. Yang, and Y. Qiao, \"Mm-safetybench: A benchmark for safety evaluation of multimodal large language models,\" in European Conference on Computer Vision. Springer, 2024, pp. 386-403." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.841, + 0.923, + 0.913 + ], + "angle": 0, + "content": "[761] W. Luo, S. Ma, X. Liu, X. Guo, and C. Xiao, \"Jailbreakv-28k: A benchmark for assessing the robustness of multimodal large language models against jailbreak attacks,\" arXiv e-prints, pp. arXiv-2404, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.914, + 0.923, + 0.943 + ], + "angle": 0, + "content": "[762] F. Weng, Y. Xu, C. Fu, and W. Wang, \"A comprehensive study on jailbreak attacks and defenses for" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.925, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "63" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.49, + 0.083 + ], + "angle": 0, + "content": "multimodal large language models,\" arXiv preprint arXiv:2408.08464, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.084, + 0.492, + 0.127 + ], + "angle": 0, + "content": "[763] Z. Li, P.-Y. Chen, and T.-Y. Ho, \"Retention score: Quantifying jailbreak risks for vision language models,\" arXiv preprint arXiv:2412.17544, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.127, + 0.492, + 0.228 + ], + "angle": 0, + "content": "[764] T. Guan, F. Liu, X. Wu, R. Xian, Z. Li, X. Liu, X. Wang, L. Chen, F. Huang, Y. Yacoob et al., \"Hallusionbench: an advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 14375-14385." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.228, + 0.492, + 0.286 + ], + "angle": 0, + "content": "[765] Y. Li, Y. Du, K. Zhou, J. Wang, W. X. Zhao, and J.-R. Wen, \"Evaluating object hallucination in large vision-language models,\" arXiv preprint arXiv:2305.10355, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.287, + 0.492, + 0.346 + ], + "angle": 0, + "content": "[766] C. Cui, Y. Zhou, X. Yang, S. Wu, L. Zhang, J. Zou, and H. Yao, “Holistic analysis of hallucination in gpt-4v (ision): Bias and interference challenges,” arXiv preprint arXiv:2311.03287, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.346, + 0.492, + 0.389 + ], + "angle": 0, + "content": "[767] S. Wang, X. Ye, Q. Cheng, J. Duan, S. Li, J. Fu, X. Qiu, and X. Huang, \"Cross-modality safety alignment,\" arXiv preprint arXiv:2406.15279, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.389, + 0.492, + 0.461 + ], + "angle": 0, + "content": "[768] A. Agarwal, S. Panda, A. Charles, B. Kumar, H. Patel, P. Pattnayak, T. H. Rafi, T. Kumar, and D.-K. Chae, \"Mvtamperbench: Evaluating robustness of vision-language models,\" arXiv preprint arXiv:2412.19794, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.461, + 0.492, + 0.535 + ], + "angle": 0, + "content": "[769] H. Zhang, W. Shao, H. Liu, Y. Ma, P. Luo, Y. Qiao, and K. Zhang, \"Avibench: Towards evaluating the robustness of large vision-language model on adversarial visual-instructions,\" arXiv e-prints, pp. arXiv-2403, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.535, + 0.492, + 0.579 + ], + "angle": 0, + "content": "[770] Z. Hu, Y. Ren, J. Li, and Y. Yin, \"Viva: A benchmark for vision-grounded decision-making with human values,\" arXiv preprint arXiv:2407.03000, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.579, + 0.492, + 0.651 + ], + "angle": 0, + "content": "[771] Y. Xiao, A. Liu, Q. Cheng, Z. Yin, S. Liang, J. Li, J. Shao, X. Liu, and D. Tao, \"Genderbias- \\(\\cdot\\) emph {VL}: Benchmarking gender bias in vision language models via counterfactual probing,\" arXiv preprint arXiv:2407.00600, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.651, + 0.492, + 0.724 + ], + "angle": 0, + "content": "[772] L. Gustafson, C. Rolland, N. Ravi, Q. Duval, A. Adcock, C.-Y. Fu, M. Hall, and C. Ross, \"Facet: Fairness in computer vision evaluation benchmark,\" in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023, pp. 20370-20382." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.724, + 0.492, + 0.797 + ], + "angle": 0, + "content": "[773] E. Slyman, S. Lee, S. Cohen, and K. Kafle, \"Fairdedup: Detecting and mitigating vision-language fairness disparities in semantic dataset dedduplication,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 13905-13916." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.797, + 0.492, + 0.87 + ], + "angle": 0, + "content": "[774] Y. Zhang, J. Wang, and J. Sang, \"Counterfactually measuring and eliminating social bias in vision-language pre-training models,\" in Proceedings of the 30th ACM International Conference on Multimedia, 2022, pp. 4996-5004." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.87, + 0.492, + 0.928 + ], + "angle": 0, + "content": "[775] K. C. Fraser and S. Kiritchenko, \"Examining gender and racial bias in large vision-language models using a novel dataset of parallel images,\" arXiv preprint arXiv:2402.05779, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.928, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[776] A. Seth, M. Hemani, and C. Agarwal, \"Dear: Debias" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.923, + 0.099 + ], + "angle": 0, + "content": "ing vision-language models with additive residuals,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 6820-6829." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.099, + 0.923, + 0.156 + ], + "angle": 0, + "content": "[777] S. Janghorbani and G. De Melo, \"Multimodal bias: Introducing a framework for stereotypical bias assessment beyond gender and race in vision language models,\" arXiv preprint arXiv:2303.12734, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.157, + 0.923, + 0.228 + ], + "angle": 0, + "content": "[778] Y. Zhang, Y. Huang, Y. Sun, C. Liu, Z. Zhao, Z. Fang, Y. Wang, H. Chen, X. Yang, X. Wei et al., \"Benchmarking trustworthiness of multimodal large language models: A comprehensive study,\" arXiv preprint arXiv:2406.07057, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.228, + 0.923, + 0.3 + ], + "angle": 0, + "content": "[779] Y. Zhang, L. Chen, G. Zheng, Y. Gao, R. Zheng, J. Fu, Z. Yin, S. Jin, Y. Qiao, X. Huang et al., \"Spa-vl: A comprehensive safety preference alignment dataset for vision language model,\" arXiv preprint arXiv:2406.12030, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.301, + 0.923, + 0.373 + ], + "angle": 0, + "content": "[780] Z. Zhang, T. Kou, S. Wang, C. Li, W. Sun, W. Wang, X. Li, Z. Wang, X. Cao, X. Min et al., \"Q-eval-100k: Evaluating visual quality and alignment level for text-to-vision content,\" arXiv preprint arXiv:2503.02357, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.373, + 0.923, + 0.463 + ], + "angle": 0, + "content": "[781] W. Wang, X. Liu, K. Gao, J.-T. Huang, Y. Yuan, P. He, S. Wang, and Z. Tu, \"Can't see the forest for the trees: Benchmarking multimodal safety awareness for multimodal llms,\" ArXiv, vol. abs/2502.11184, 2025. [Online]. Available: https://api.sementicscholar.org/CorpusID:276409442" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.463, + 0.923, + 0.55 + ], + "angle": 0, + "content": "[782] W. Wang, K. Gao, Z. Jia, Y. Yuan, J.-T. Huang, Q. Liu, S. Wang, W. Jiao, and Z. Tu, \"Chain-of-jailbreak attack for image generation models via editing step by step,\" ArXiv, vol. abs/2410.03869, 2024. [Online]. Available: https://api_semanticscholar.org/ CorpusID:273186566" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.55, + 0.923, + 0.607 + ], + "angle": 0, + "content": "[783] H. Naveed, A. U. Khan, S. Qiu, M. Saqib, S. Anwar, M. Usman, N. Akhtar, N. Barnes, and A. Mian, \"A comprehensive overview of large language models,\" arXiv preprint arXiv:2307.06435, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.608, + 0.923, + 0.68 + ], + "angle": 0, + "content": "[784] W. Zhao, Y. Hu, Y. Deng, J. Guo, X. Sui, X. Han, A. Zhang, Y. Zhao, B. Qin, T.-S. Chua et al., \"Beware of your po! measuring and mitigating ai safety risks in role-play fine-tuning of llms,\" arXiv preprint arXiv:2502.20968, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.68, + 0.923, + 0.754 + ], + "angle": 0, + "content": "[785] B. Liu, X. Li, J. Zhang, J. Wang, T. He, S. Hong, H. Liu, S. Zhang, K. Song, K. Zhu et al., \"Advances and challenges in foundation agents: From brain-inspired intelligence to evolutionary, collaborative, and safe systems,\" arXiv preprint arXiv:2504.01990, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.754, + 0.923, + 0.813 + ], + "angle": 0, + "content": "[786] H. Jin, L. Huang, H. Cai, J. Yan, B. Li, and H. Chen, \"From llms to llm-based agents for software engineering: A survey of current, challenges and future,\" arXiv preprint arXiv:2408.02479, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.813, + 0.923, + 0.885 + ], + "angle": 0, + "content": "[787] J. Piao, Y. Yan, J. Zhang, N. Li, J. Yan, X. Lan, Z. Lu, Z. Zheng, J. Y. Wang, D. Zhou et al., \"Agentsociety: Large-scale simulation of llm-driven generative agents advances understanding of human behaviors and society,\" arXiv preprint arXiv:2502.08691, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.885, + 0.923, + 0.944 + ], + "angle": 0, + "content": "[788] Y. Yan, S. Wang, J. Huo, P. S. Yu, X. Hu, and Q. Wen, \"Mathagent: Leveraging a mixture-of-math-agent framework for real-world multimodal mathematical error detection,\" arXiv preprint arXiv:2503.18132, 2025." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "64" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.126 + ], + "angle": 0, + "content": "[789] H. Wang, A. Zhang, N. Duy Tai, J. Sun, T.-S. Chua et al., \"Ali-agent: Assessing llms' alignment with human values via agent-based evaluation,\" Advances in Neural Information Processing Systems, vol. 37, pp. 99040-99088, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.127, + 0.492, + 0.185 + ], + "angle": 0, + "content": "[790] K. Zhang, J. Li, G. Li, X. Shi, and Z. Jin, \"Codeagent: Enhancing code generation with tool-integrated agent systems for real-world repo-level coding challenges,\" arXiv preprint arXiv:2401.07339, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.186, + 0.492, + 0.243 + ], + "angle": 0, + "content": "[791] Y. Shen, K. Song, X. Tan, D. Li, W. Lu, and Y. Zhuang, \"Hugginggpt: Solving ai tasks with chatgpt and its friends in hugging face,\" Advances in Neural Information Processing Systems, vol. 36, pp. 38154-38180, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.244, + 0.492, + 0.301 + ], + "angle": 0, + "content": "[792] Z. Chu, S. Wang, J. Xie, T. Zhu, Y. Yan, J. Ye, A. Zhong, X. Hu, J. Liang, P. S. Yu et al., \"Llm agents for education: Advances and applications,\" arXiv preprint arXiv:2503.11733, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.302, + 0.492, + 0.358 + ], + "angle": 0, + "content": "[793] W. Zhang, Y. Shen, W. Lu, and Y. Zhuang, \"Data-copilot: Bridging billions of data and humans with autonomous workflow,\" arXiv preprint arXiv:2306.07209, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.36, + 0.492, + 0.404 + ], + "angle": 0, + "content": "[794] W. Xu, Z. Liang, K. Mei, H. Gao, J. Tan, and Y. Zhang, \"A-mem: Agentic memory for llm agents,\" arXiv preprint arXiv:2502.12110, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.405, + 0.492, + 0.447 + ], + "angle": 0, + "content": "[795] Y. Shang, Y. Li, K. Zhao, L. Ma, J. Liu, F. Xu, and Y. Li, \"Agentsquare: Automatic llm agent search in modular design space,\" arXiv preprint arXiv:2410.06153, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.448, + 0.492, + 0.521 + ], + "angle": 0, + "content": "[796] J. Yang, C. Jimenez, A. Wettig, K. Lieret, S. Yao, K. Narasimhan, and O. Press, \"Swe-agent: Agent-computer interfaces enable automated software engineering,\" Advances in Neural Information Processing Systems, vol. 37, pp. 50528-50652, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.522, + 0.492, + 0.579 + ], + "angle": 0, + "content": "[797] S. Agashe, J. Han, S. Gan, J. Yang, A. Li, and X. E. Wang, \"Agent s: An open agentic framework that uses computers like a human,\" arXiv preprint arXiv:2410.08164, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.58, + 0.492, + 0.636 + ], + "angle": 0, + "content": "[798] S. Hao, Y. Gu, H. Ma, J. J. Hong, Z. Wang, D. Z. Wang, and Z. Hu, \"Reasoning with language model is planning with world model,\" arXiv preprint arXiv:2305.14992, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.637, + 0.492, + 0.694 + ], + "angle": 0, + "content": "[799] J. Hong, J. Lin, A. Dragan, and S. Levine, \"Interactive dialogue agents via reinforcement learning on hindsight regenerations,\" arXiv preprint arXiv:2411.05194, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.695, + 0.492, + 0.739 + ], + "angle": 0, + "content": "[800] J. Tang, T. Fan, and C. Huang, \"Autoagent: A fully-automated and zero-code framework for llm agents,\" arXiv e-prints, pp. arXiv-2502, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.74, + 0.492, + 0.812 + ], + "angle": 0, + "content": "[801] G. Li, H. Hammoud, H. Itani, D. Khizbullin, and B. Ghanem, \"Camel: Communicative agents for\" mind\" exploration of large language model society,\" Advances in Neural Information Processing Systems, vol. 36, pp. 51991-52008, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.813, + 0.492, + 0.869 + ], + "angle": 0, + "content": "[802] S. Yuan, K. Song, J. Chen, X. Tan, D. Li, and D. Yang, \"Evoagent: Towards automatic multi-agent generation via evolutionary algorithms,\" arXiv preprint arXiv:2406.14228, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.87, + 0.492, + 0.927 + ], + "angle": 0, + "content": "[803] M. Zhuge, W. Wang, L. Kirsch, F. Faccio, D. Khizbullin, and J. Schmidhuber, \"Language agents as estimizable graphs,\" arXiv preprint arXiv:2402.16823, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.928, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[804] Y. Wang, T. Shen, L. Liu, and J. Xie, \"Sibyl: Simple" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.922, + 0.083 + ], + "angle": 0, + "content": "yet effective agent framework for complex real-world reasoning,\" arXiv preprint arXiv:2407.10718, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.084, + 0.922, + 0.142 + ], + "angle": 0, + "content": "[805] Z. Wang, X. Zeng, W. Liu, L. Li, Y. Wang, L. Shang, X. Jiang, Q. Liu, and K.-F. Wong, \"Toolflow: Boosting llm tool-calling through natural and coherent dialogue synthesis,\" arXiv preprint arXiv:2410.18447, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.143, + 0.922, + 0.184 + ], + "angle": 0, + "content": "[806] F. Wu, S. Wu, Y. Cao, and C. Xiao, \"Wipi: A new web threat for llm-driven web agents,\" arXiv preprint arXiv:2402.16965, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.186, + 0.922, + 0.258 + ], + "angle": 0, + "content": "[807] S. S. Kannan, V. L. Venkatesh, and B.-C. Min, \"Smartllm: Smart multi-agent robot task planning using large language models,\" in 2024 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2024, pp. 12140-12147." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.259, + 0.922, + 0.315 + ], + "angle": 0, + "content": "[808] R. Fang, R. Bindu, A. Gupta, and D. Kang, \"Llm agents can autonomously exploit one-day vulnerabilities,\" arXiv preprint arXiv:2404.08144, vol. 13, p. 14, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.317, + 0.922, + 0.36 + ], + "angle": 0, + "content": "[809] R. Fang, R. Bindu, A. Gupta, Q. Zhan, and D. Kang, \"Llm agents can autonomously hack websites,\" arXiv preprint arXiv:2402.06664, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.361, + 0.922, + 0.404 + ], + "angle": 0, + "content": "[810] W. Cheng, K. Sun, X. Zhang, and W. Wang, \"Security attacks on llm-based code completion tools,\" arXiv preprint arXiv:2408.11006, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.405, + 0.922, + 0.462 + ], + "angle": 0, + "content": "[811] X. Fu, Z. Wang, S. Li, R. K. Gupta, N. Mireshghallah, T. Berg-Kirkpatrick, and E. Fernandes, \"Misusing tools in large language models with visual adversarial examples,\" arXiv preprint arXiv:2310.03185, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.463, + 0.922, + 0.52 + ], + "angle": 0, + "content": "[812] X. Fu, S. Li, Z. Wang, Y. Liu, R. K. Gupta, T. Berg-Kirkpatrick, and E. Fernandes, \"Imprompter: Tricking llm agents into improper tool use,\" arXiv preprint arXiv:2410.14923, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.521, + 0.922, + 0.579 + ], + "angle": 0, + "content": "[813] B. Zhang, Y. Tan, Y. Shen, A. Salem, M. Backes, S. Zannettou, and Y. Zhang, \"Breaking agents: Compromising autonomous llm agents through malfunction amplification,\" arXiv preprint arXiv:2407.20859, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.58, + 0.922, + 0.637 + ], + "angle": 0, + "content": "[814] H. Wang, R. Zhang, J. Wang, M. Li, Y. Huang, D. Wang, and Q. Wang, \"From allies to adversaries: Manipulating llm tool-calling through adversarial injection,\" arXiv preprint arXiv:2412.10198, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.638, + 0.922, + 0.708 + ], + "angle": 0, + "content": "[815] W. Yang, X. Bi, Y. Lin, S. Chen, J. Zhou, and X. Sun, \"Watch out for your agents! investigating backdoor threats to lvm-based agents,\" Advances in Neural Information Processing Systems, vol. 37, pp. 100938-100964, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.71, + 0.922, + 0.768 + ], + "angle": 0, + "content": "[816] P. Zhu, Z. Zhou, Y. Zhang, S. Yan, K. Wang, and S. Su, \"Demonagent: Dynamically encrypted multi-backdoor implantation attack on llm-based agent,\" arXiv preprint arXiv:2502.12575, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.769, + 0.922, + 0.812 + ], + "angle": 0, + "content": "[817] Y. Wang, D. Xue, S. Zhang, and S. Qian, \"Badagent: Inserting and activating backdoor attacks in llm agents,\" arXiv preprint arXiv:2406.03007, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.813, + 0.922, + 0.883 + ], + "angle": 0, + "content": "[818] Z. Jiang, M. Li, G. Yang, J. Wang, Y. Huang, Z. Chang, and Q. Wang, \"Mimicking the familiar: Dynamic command generation for information theft attacks in llm tool-learning system,\" arXiv preprint arXiv:2502.11358, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.885, + 0.922, + 0.928 + ], + "angle": 0, + "content": "[819] W. Zhao, V. Khazanchi, H. Xing, X. He, Q. Xu, and N. D. Lane, \"Attacks on third-party apis of large language models,\" arXiv preprint arXiv:2404.16891, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.929, + 0.922, + 0.943 + ], + "angle": 0, + "content": "[820] J. Chen and S. L. Cong, \"Agentguard: Repurposing" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.922, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "65" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.49, + 0.083 + ], + "angle": 0, + "content": "agentric orchestrator for safety evaluation of tool orchestration,\" arXiv preprint arXiv:2502.09809, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.084, + 0.492, + 0.155 + ], + "angle": 0, + "content": "[821] X. Zhang, H. Xu, Z. Ba, Z. Wang, Y. Hong, J. Liu, Z. Qin, and K. Ren, \"Privacyasst: Safeguarding user privacy in tool-using large language model agents,\" IEEE Transactions on Dependable and Secure Computing, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.157, + 0.492, + 0.227 + ], + "angle": 0, + "content": "[822] Z. Xiang, L. Zheng, Y. Li, J. Hong, Q. Li, H. Xie, J. Zhang, Z. Xiong, C. Xie, C. Yang et al., \"Guardagent: Safeguard llm agents by a guard agent via knowledge-enabled reasoning,\" arXiv preprint arXiv:2406.09187, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.229, + 0.492, + 0.287 + ], + "angle": 0, + "content": "[823] Y. Gao, Y. Xiong, X. Gao, K. Jia, J. Pan, Y. Bi, Y. Dai, J. Sun, H. Wang, and H. Wang, \"Retrieval-augmented generation for large language models: A survey,\" arXiv preprint arXiv:2312.10997, vol. 2, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.288, + 0.492, + 0.346 + ], + "angle": 0, + "content": "[824] P. Zhao, H. Zhang, Q. Yu, Z. Wang, Y. Geng, F. Fu, L. Yang, W. Zhang, J. Jiang, and B. Cui, \"Retrievalaugmented generation for ai-generated content: A survey,\" arXiv preprint arXiv:2402.19473, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.346, + 0.492, + 0.389 + ], + "angle": 0, + "content": "[825] C. Xiang, T. Wu, Z. Zhong, D. Wagner, D. Chen, and P. Mittal, \"Certifiably robust rag against retrieval corruption,\" arXiv preprint arXiv:2405.15556, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.39, + 0.492, + 0.461 + ], + "angle": 0, + "content": "[826] Z. Chen, Z. Xiang, C. Xiao, D. Song, and B. Li, \"Agentpoison: Red-teaming llm agents via poisoning memory or knowledge bases,\" Advances in Neural Information Processing Systems, vol. 37, pp. 130-185-130-213, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.462, + 0.492, + 0.52 + ], + "angle": 0, + "content": "[827] W. Zou, R. Geng, B. Wang, and J. Jia, \"Poisonedrag: Knowledge corruption attacks to retrieval-augmented generation of large language models,\" arXiv preprint arXiv:2402.07867, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.521, + 0.492, + 0.564 + ], + "angle": 0, + "content": "[828] Z. Zhong, Z. Huang, A. Wettig, and D. Chen, \"Poisoning retrieval corpora by injecting adversarial passages,\" arXiv preprint arXiv:2310.19156, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.565, + 0.492, + 0.622 + ], + "angle": 0, + "content": "[829] X. Gu, X. Zheng, T. Pang, C. Du, Q. Liu, Y. Wang, J. Jiang, and M. Lin, \"Agent smith: A single image can jailbreak one million multimodal llm agents exponentially fast,\" arXiv preprint arXiv:2402.08567, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.623, + 0.492, + 0.68 + ], + "angle": 0, + "content": "[830] A. Li, Y. Zhou, V. C. Raghuram, T. Goldstein, and M. Goldblum, \"Commercial llm agents are already vulnerable to simple yet dangerous attacks,\" arXiv preprint arXiv:2502.08586, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.681, + 0.492, + 0.738 + ], + "angle": 0, + "content": "[831] H. Li, M. Xu, and Y. Song, \"Sentence embedding leaks more information than you expect: Generative embedding inversion attack to recover the whole sentence,\" arXiv preprint arXiv:2305.03010, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.739, + 0.492, + 0.796 + ], + "angle": 0, + "content": "[832] M. Russinovich, A. Salem, and R. Eldan, \"Great, now write an article about that: The crescendo multi-turn llm jailbreak attack,\" arXiv preprint arXiv:2404.01833, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.798, + 0.492, + 0.855 + ], + "angle": 0, + "content": "[833] Y. Cheng, M. Georgopoulos, V. Cevher, and G. G. Chrysos, \"Leveraging the context through multiround interactions for jailbreaking attacks,\" arXiv preprint arXiv:2402.09177, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.856, + 0.492, + 0.928 + ], + "angle": 0, + "content": "[834] A. Priyanshu and S. Vijay, \"Fractured-sorry-bench: Framework for revealing attacks in conversational turns undermining refusal efficacy and defenses over sorry-bench (automated multi-shot jailbreaks),\" arXiv preprint arXiv:2408.16163, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.929, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[835] D. Agarwal, A. R. Fabbri, B. Risher, P. Laban," + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.923, + 0.098 + ], + "angle": 0, + "content": "S. Joty, and C.-S. Wu, \"Prompt leakage effect and defense strategies for multi-turn llm interactions,\" arXiv preprint arXiv:2404.16251, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.099, + 0.923, + 0.154 + ], + "angle": 0, + "content": "[836] T. Tong, J. Xu, Q. Liu, and M. Chen, \"Securing multi-turn conversational language models from distributed backdoor triggers,\" arXiv preprint arXiv:2407.04151, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.155, + 0.923, + 0.228 + ], + "angle": 0, + "content": "[837] J. Mao, F. Meng, Y. Duan, M. Yu, X. Jia, J. Fang, Y. Liang, K. Wang, and Q. Wen, \"Agentsafe: Safeguarding large language model-based multi-agent systems via hierarchical data management,\" arXiv preprint arXiv:2503.04392, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.229, + 0.923, + 0.273 + ], + "angle": 0, + "content": "[838] H. Zhou, K.-H. Lee, Z. Zhan, Y. Chen, and Z. Li, \"Trustrag: Enhancing robustness and trustworthiness in rag,\" arXiv preprint arXiv:2501.00879, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.273, + 0.923, + 0.346 + ], + "angle": 0, + "content": "[839] X. Xian, G. Wang, X. Bi, J. Srinivasa, A. Kundu, C. Fleming, M. Hong, and J. Ding, \"On the vulnerability of applying retrieval-augmented generation within knowledge-intensive application domains,\" arXiv preprint arXiv:2409.17275, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.346, + 0.923, + 0.419 + ], + "angle": 0, + "content": "[840] B. Chen, G. Wang, H. Guo, Y. Wang, and Q. Yan, \"Understanding multi-turn toxic behaviors in open-domain chatbots,\" in Proceedings of the 26th International Symposium on Research in Attacks, Intrusions and Defenses, 2023, pp. 282-296." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.419, + 0.923, + 0.476 + ], + "angle": 0, + "content": "[841] R. Song, M. O. Ozmen, H. Kim, A. Bianchi, and Z. B. Celik, \"Enhancing llm-based autonomous driving agents to mitigate perception attacks,\" arXiv preprint arXiv:2409.14488, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.476, + 0.923, + 0.535 + ], + "angle": 0, + "content": "[842] C. H. Low, Z. Wang, T. Zhang, Z. Zeng, Z. Zhuo, E. B. Mazomenos, and Y. Jin, \"Surgraw: Multi-agent workflow with chain-of-thought reasoning for surgical intelligence,\" arXiv preprint arXiv:2503.10265, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.535, + 0.923, + 0.592 + ], + "angle": 0, + "content": "[843] Z. Wang, J. Wu, C. H. Low, and Y. Jin, \"Medagent-pro: Towards multi-modal evidence-based medical diagnosis via reasoning agentic workflow,\" arXiv preprint arXiv:2503.18968, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.593, + 0.923, + 0.666 + ], + "angle": 0, + "content": "[844] K. N. Jeptoo and C. Sun, \"Enhancing fake news detection with large language models through multi-agent debates,\" in CCF International Conference on Natural Language Processing and Chinese Computing. Springer, 2024, pp. 474-486." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.666, + 0.923, + 0.709 + ], + "angle": 0, + "content": "[845] T. Park, \"Enhancing anomaly detection in financial markets with an llm-based multi-agent framework,\" arXiv preprint arXiv:2403.19735, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.71, + 0.923, + 0.781 + ], + "angle": 0, + "content": "[846] Z. Yang, S. S. Raman, A. Shah, and S. Tellex, \"Plug in the safety chip: Enforcing constraints for llm-driven robot agents,\" in 2024 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2024, pp. 14435-14442." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.782, + 0.923, + 0.855 + ], + "angle": 0, + "content": "[847] J. Zhang, C. Xu, and B. Li, \"Chatscene: Knowledge-enabled safety-critical scenario generation for autonomous vehicles,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 15459-15469." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.856, + 0.923, + 0.914 + ], + "angle": 0, + "content": "[848] T. Abuelsaad, D. Akkil, P. Dey, A. Jagmohan, A. Vempaty, and R. Kokku, \"Agent-e: From autonomous web navigation to foundational design principles in agenti-tic systems,\" arXiv preprint arXiv:2407.13032, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.915, + 0.923, + 0.943 + ], + "angle": 0, + "content": "[849] E. Debenedetti, J. Zhang, M. Balunović, L. Beurer-Kellner, M. Fischer, and F. Tramère, \"Agentdojo: A dy" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "66" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.49, + 0.083 + ], + "angle": 0, + "content": "namic environment to evaluate attacks and defenses for llm agents,\" arXiv preprint arXiv:2406.13352, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.084, + 0.492, + 0.156 + ], + "angle": 0, + "content": "[850] Y. Sun, N. Salami Pargoo, P. Jin, and J. Ortiz, \"Optimizing autonomous driving for safety: A human-centric approach with lvm-enhanced rlhf,\" in Companion of the 2024 on ACM International Joint Conference on Pervasive and Ubiquitous Computing, 2024, pp. 76-80." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.157, + 0.492, + 0.212 + ], + "angle": 0, + "content": "[851] R. Fang, R. Bindu, A. Gupta, and D. Kang, \"Llm agents can autonomously exploit one-day vulnerabilities,\" arXiv preprint arXiv:2404.08144, vol. 13, p. 14, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.215, + 0.492, + 0.287 + ], + "angle": 0, + "content": "[852] Y. H. Ke, R. Yang, S. A. Lie, T. X. Y. Lim, H. R. Abdullah, D. S. W. Ting, and N. Liu, \"Enhancing diagnostic accuracy through multi-agent conversations: using large language models to mitigate cognitive bias,\" arXiv preprint arXiv:2401.14589, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.288, + 0.492, + 0.344 + ], + "angle": 0, + "content": "[853] X. Mou, Z. Wei, and X. Huang, \"Unveiling the truth and facilitating change: Towards agent-based largescale social movement simulation,\" arXiv preprint arXiv:2402.16333, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.346, + 0.492, + 0.403 + ], + "angle": 0, + "content": "[854] Z. Chen, J. Chen, J. Chen, and M. Sra, \"Position: Standard benchmarks fail-llm agents present overlooked risks for financial applications,\" arXiv preprint arXiv:2502.15865, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.405, + 0.492, + 0.476 + ], + "angle": 0, + "content": "[855] Z. Liu, R. Zeng, D. Wang, G. Peng, J. Wang, Q. Liu, P. Liu, and W. Wang, \"Agents4plc: Automating closed-loop plc code generation and verification in industrial control systems using llm-based agents,\" arXiv preprint arXiv:2410.14209, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.477, + 0.492, + 0.548 + ], + "angle": 0, + "content": "[856] S. Mukherjee, P. Gamble, M. S. Ausin, N. Kant, K. Aggarwal, N. Manjunath, D. Datta, Z. Liu, J. Ding, S. Busacca et al., \"Polaris: A safety-focused llm constellation architecture for healthcare,\" arXiv preprint arXiv:2403.13313, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.55, + 0.492, + 0.606 + ], + "angle": 0, + "content": "[857] L. La Cava and A. Tagarelli, \"Safeguarding decentralized social media: Llm agents for automating community rule compliance,\" arXiv preprint arXiv:2409.08963, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.608, + 0.492, + 0.666 + ], + "angle": 0, + "content": "[858] Y. Gan, Y. Yang, Z. Ma, P. He, R. Zeng, Y. Wang, Q. Li, C. Zhou, S. Li, T. Wang et al., \"Navigating the risks: A survey of security, privacy, and ethics threats in lmbased agents,\" arXiv preprint arXiv:2411.09523, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.667, + 0.492, + 0.724 + ], + "angle": 0, + "content": "[859] Z. Deng, Y. Guo, C. Han, W. Ma, J. Xiong, S. Wen, and Y. Xiang, \"Ai agents under threat: A survey of key security challenges and future pathways,\" ACM Computing Surveys, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.725, + 0.492, + 0.781 + ], + "angle": 0, + "content": "[860] R. Ye, S. Tang, R. Ge, Y. Du, Z. Yin, S. Chen, and J. Shao, \"Mas-gpt: Training llms to build llm-based multi-agent systems,\" arXiv preprint arXiv:2503.03686, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.783, + 0.492, + 0.841 + ], + "angle": 0, + "content": "[861] J. Zhang, J. Xiang, Z. Yu, F. Teng, X. Chen, J. Chen, M. Zhuge, X. Cheng, S. Hong, J. Wang et al., \"Aflow: Automating agentic workflow generation,\" arXiv preprint arXiv:2410.10762, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.842, + 0.492, + 0.884 + ], + "angle": 0, + "content": "[862] L. Panait and S. Luke, \"Cooperative multi-agent learning: The state of the art,\" Autonomous agents and multiagent systems, vol. 11, pp. 387-434, 2005." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.886, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[863] L. Hammond, A. Chan, J. Clifton, J. Hoelscher-Obermaier, A. Khan, E. McLean, C. Smith, W. Barfuss, J. Foerster, T. Gavencciak et al., \"Multi-agent risks from advanced ai,\" arXiv preprint arXiv:2502.14143, 2025." + }, + { + "type": "list", + "bbox": [ + 0.076, + 0.054, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.054, + 0.922, + 0.111 + ], + "angle": 0, + "content": "[864] R. Xu, X. Li, S. Chen, and W. Xu, \"Nuclear deployed: Analyzing catastrophic risks in decision-making of autonomous llm agents,\" arXiv preprint arXiv:2502.11355, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.113, + 0.922, + 0.17 + ], + "angle": 0, + "content": "[865] Z. Zhou, Z. Li, J. Zhang, Y. Zhang, K. Wang, Y. Liu, and Q. Guo, \"Corba: Contagious recursive blocking attacks on multi-agent systems based on large language models,\" arXiv preprint arXiv:2502.14529, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.171, + 0.922, + 0.228 + ], + "angle": 0, + "content": "[866] Z. Tan, C. Zhao, R. Moraffah, Y. Li, Y. Kong, T. Chen, and H. Liu, \"The wolf within: Covert injection of malice into mllm societies via an mllm operative,\" arXiv preprint arXiv:2402.14859, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.229, + 0.922, + 0.287 + ], + "angle": 0, + "content": "[867] M. Yu, S. Wang, G. Zhang, J. Mao, C. Yin, Q. Liu, Q. Wen, K. Wang, and Y. Wang, \"Netsafe: Exploring the topological safety of multi-agent networks,\" arXiv preprint arXiv:2410.15686, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.288, + 0.922, + 0.345 + ], + "angle": 0, + "content": "[868] J.-t. Huang, J. Zhou, T. Jin, X. Zhou, Z. Chen, W. Wang, Y. Yuan, M. Sap, and M. R. Lyu, \"On the resilience of multi-agent systems with malicious agents,\" arXiv preprint arXiv:2408.00989, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.346, + 0.922, + 0.389 + ], + "angle": 0, + "content": "[869] P. He, Y. Lin, S. Dong, H. Xu, Y. Xing, and H. Liu, \"Red-teaming llm multi-agent systems via communication attacks,\" arXiv preprint arXiv:2502.14847, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.39, + 0.922, + 0.432 + ], + "angle": 0, + "content": "[870] Y. Tian, X. Yang, J. Zhang, Y. Dong, and H. Su, \"Evil geniuses: Delving into the safety of llm-based agents,\" arXiv preprint arXiv:2311.11855, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.434, + 0.922, + 0.504 + ], + "angle": 0, + "content": "[871] A. Amayuelas, X. Yang, A. Antoniades, W. Hua, L. Pan, and W. Wang, \"Multiagent collaboration attack: Investigating adversarial attacks in large language model collaborations via debate,\" arXiv preprint arXiv:2406.14711, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.506, + 0.922, + 0.564 + ], + "angle": 0, + "content": "[872] T. Ju, Y. Wang, X. Ma, P. Cheng, H. Zhao, Y. Wang, L. Liu, J. Xie, Z. Zhang, and G. Liu, \"Flooding spread of manipulated knowledge in llm-based multi-agent communities,\" arXiv preprint arXiv:2407.07791, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.565, + 0.922, + 0.607 + ], + "angle": 0, + "content": "[873] G. Lin and Q. Zhao, \"Large language model sentinel: Llm agent for adversarial purification,\" arXiv preprint arXiv:2405.20770, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.608, + 0.922, + 0.651 + ], + "angle": 0, + "content": "[874] Y. Zeng, Y. Wu, X. Zhang, H. Wang, and Q. Wu, \"Autodefense: Multi-agent llm defense against jailbreak attacks,\" arXiv preprint arXiv:2403.04783, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.652, + 0.922, + 0.694 + ], + "angle": 0, + "content": "[875] S. Chern, Z. Fan, and A. Liu, \"Combating adversarial attacks with multi-agent debate,\" arXiv preprint arXiv:2401.05998, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.695, + 0.922, + 0.768 + ], + "angle": 0, + "content": "[876] B. Chen, G. Li, X. Lin, Z. Wang, and J. Li, \"Blockagents: Towards byzantine-robust llm-based multi-agent coordination via blockchain,\" in Proceedings of the ACM Turing Award Celebration Conference-China 2024, 2024, pp. 187-192." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.769, + 0.922, + 0.825 + ], + "angle": 0, + "content": "[877] C. Song, L. Ma, J. Zheng, J. Liao, H. Kuang, and L. Yang, \"Audit-llm: Multi-agent collaboration for log-based insider threat detection,\" arXiv preprint arXiv:2408.08902, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.827, + 0.922, + 0.897 + ], + "angle": 0, + "content": "[878] S. Wang, G. Zhang, M. Yu, G. Wan, F. Meng, C. Guo, K. Wang, and Y. Wang, \"G-safeguard: A topology-guided security lens and treatment on llm-based multi-agent systems,\" arXiv preprint arXiv:2502.11127, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.899, + 0.922, + 0.943 + ], + "angle": 0, + "content": "[879] Z. Wu, S. Pan, F. Chen, G. Long, C. Zhang, and S. Y. Philip, \"A comprehensive survey on graph neural networks,\" IEEE transactions on neural networks and" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.922, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.033, + 0.922, + 0.043 + ], + "angle": 0, + "content": "67" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.431, + 0.069 + ], + "angle": 0, + "content": "learning systems, vol. 32, no. 1, pp. 4-24, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.069, + 0.492, + 0.126 + ], + "angle": 0, + "content": "[880] X. Zheng, Y. Wang, Y. Liu, M. Li, M. Zhang, D. Jin, P. S. Yu, and S. Pan, \"Graph neural networks for graphs with heterophily: A survey,\" arXiv preprint arXiv:2202.07082, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.127, + 0.492, + 0.199 + ], + "angle": 0, + "content": "[881] M. R. Genesereth and S. P. Ketchpel, \"The kqml protocol: A specification of language and communication,\" in Proceedings of the Third International Conference on Information and Knowledge Management (CIKM). ACM, 1993, pp. 1-10." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.2, + 0.492, + 0.302 + ], + "angle": 0, + "content": "[882] D. S. Milojicic, M. Breugst, I. Busse, J. Campbell, S. Covaci, B. Friedman, K. Kosaka, D. B. Lange, K. Ono, M. Oshima, C. Tham, S. Virdhagriswaran, and J. White, \"Masif: The omg mobile agent system interoperability facility,\" in Proceedings of the Second International Workshop on Mobile Agents, ser. MA '98. Berlin, Heidelberg: Springer-Verlag, 1998, p. 50-67." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.303, + 0.492, + 0.346 + ], + "angle": 0, + "content": "[883] F. for Intelligent Physical Agents, \"Fipa communicative act library specification,\" https://www.fipa.org/specs/fipa00037/SC00037J.html, 2000." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.346, + 0.492, + 0.389 + ], + "angle": 0, + "content": "[884] F. Curbera, M. Duftler, R. Khalaf, W. Nagy, N. Mukhi, and S. Weerawarana, \"Web services: Why and how,\" IBM Systems Journal, vol. 41, no. 2, pp. 170-177, 2002." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.39, + 0.492, + 0.447 + ], + "angle": 0, + "content": "[885] G. Hohpe and B. Woolf, Enterprise Integration Patterns: Designing, Building, and Deploying Messaging Solutions, ser. Addison-Wesley Signature Series (Fowler). Addison-Wesley Professional, 2006." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.448, + 0.492, + 0.533 + ], + "angle": 0, + "content": "[886] P. Lewis, E. Perez, A. Piktus, F. Petroni, V. Karpukhin, N. Goyal, H. Kuttler, M. Lewis, W.-t. Yih, T. Rocktäschel et al., \"Retrieval-augmented generation for knowledge-intensive nlp tasks,\" Advances in neural information processing systems, vol. 33, pp. 9459-9474, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.535, + 0.492, + 0.579 + ], + "angle": 0, + "content": "[887] G. Izacard and E. Grave, \"Towards an efficient pipeline for knowledge-intensive nlp tasks,\" arXiv preprint arXiv:2112.04426, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.58, + 0.492, + 0.623 + ], + "angle": 0, + "content": "[888] H. Chase, \"Langchain: Build applications with llms through composability,\" https://github.com/ langchain-ai/langchain, 2022, accessed: Apr. 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.623, + 0.492, + 0.666 + ], + "angle": 0, + "content": "[889] J. Wu et al., \"Llamaindex: Connecting llms to your knowledge,\" https://github.com/jerryjliu/llama_index, 2023, accessed: Apr. 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.666, + 0.492, + 0.71 + ], + "angle": 0, + "content": "[890] OpenAI, \"Function calling in openerai models,\" https://platform.openai.com/docs/guides/functions, 2023, accessed: Apr. 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.711, + 0.492, + 0.754 + ], + "angle": 0, + "content": "[891] Anthropic, \"Model context protocol,\" 2024, accessed: 2025-04-19. [Online]. Available: https://www.anthropic.com/news/model-context-protocol" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.755, + 0.492, + 0.797 + ], + "angle": 0, + "content": "[892] Google, \"A2a: Agent2agent protocol,\" 2025, accessed: 2025-04-21. [Online]. Available: https://github.com/google/A2A" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.798, + 0.492, + 0.841 + ], + "angle": 0, + "content": "[893] G. Chang, \"Anp: Agent network protocol,\" 2024, accessed: 2025-04-21. [Online]. Available: https://www(agent-network-protocol.com/" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.842, + 0.492, + 0.883 + ], + "angle": 0, + "content": "[894] WildCardAI, \"agents.json specification,\" https://github.com/wild-card-ai/agents.json, 2025, accessed: 2025-04-22." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.885, + 0.492, + 0.928 + ], + "angle": 0, + "content": "[895] NEAR, \"Aitp: Agent interaction & transaction protocol,\" 2025, accessed: 2025-04-22. [Online]. Available: https://aitp.dev/" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.929, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[896] L. F. Al and L. Data, \"Acp: Agent communication pro" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.922, + 0.083 + ], + "angle": 0, + "content": "tocol,\" 2025, accessed: 2025-04-22. [Online]. Available: https://github.com/orgs/i-am-bee/discussions/284" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.084, + 0.922, + 0.127 + ], + "angle": 0, + "content": "[897] G. Cisco, Langchain, \"Acp: Agent connect protocol,\" 2025, accessed: 2025-04-22. [Online]. Available: https://spec.acp.agntcy.org/" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.128, + 0.922, + 0.199 + ], + "angle": 0, + "content": "[898] S. Marro, E. L. Malfa, J. Wright, G. Li, N. Shadbolt, M. Wooldridge, and P. Torr, \"A scalable communication protocol for networks of large language models,\" 2024. [Online]. Available: https://arxiv.org/abs/2410.11905" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.2, + 0.922, + 0.242 + ], + "angle": 0, + "content": "[899] Eclipse, \"Language model operating system (lmos),\" https://eclipse.dev/lmos/, 2025, accessed: 2025-04-22." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.243, + 0.922, + 0.273 + ], + "angle": 0, + "content": "[900] AlEngineerFoundation, \"Agent protocol,\" https://agentprotocol.ai/, 2025, accessed: 2025-04-22." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.273, + 0.922, + 0.331 + ], + "angle": 0, + "content": "[901] R. Ranjan, S. Gupta, and S. N. Singh, \"Loka protocol: A decentralized framework for trustworthy and ethical ai agent ecosystems,\" 2025. [Online]. Available: https://arxiv.org/abs/2504.10915" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.332, + 0.922, + 0.389 + ], + "angle": 0, + "content": "[902] A. Srinivasan, K. Bania, S. V, H. Mestha, and S. Liu, \"Implementation and application of an intelligibility protocol for interaction with an llm,\" 2024. [Online]. Available: https://arxiv.org/abs/2410.20600" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.39, + 0.922, + 0.433 + ], + "angle": 0, + "content": "[903] I. Bae, J. Lee, and H.-G. Jeon, \"Continuous locomotive crowd behavior generation,\" 2025. [Online]. Available: https://arxiv.org/abs/2504.04756" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.434, + 0.922, + 0.506 + ], + "angle": 0, + "content": "[904] L. Gąsieniec, Łukasz Kuszner, E. Latif, R. Parasuraman, P. Spirakis, and G. Stachowiak, \"Anonymous distributed localisation via spatial population protocols,\" 2024. [Online]. Available: https://arxiv.org/abs/2411.08434" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.507, + 0.922, + 0.577 + ], + "angle": 0, + "content": "[905] J. Tu, T. Wang, J. Wang, S. Manivasagam, M. Ren, and R. Urtasun, \"Adversarial attacks on multi-agent communication,\" in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2021, pp. 7768-7777." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.579, + 0.922, + 0.651 + ], + "angle": 0, + "content": "[906] L. Yuan, F. Chen, Z. Zhang, and Y. Yu, \"Communication-robust multi-agent learning by adaptable auxiliary multi-agent adversary generation,\" Frontiers of Computer Science, vol. 18, no. 6, p. 186331, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.652, + 0.922, + 0.71 + ], + "angle": 0, + "content": "[907] J. Blumenkamp and A. Prorok, \"The emergence of adversarial communication in multi-agent reinforcement learning,\" in Conference on Robot Learning. PMLR, 2021, pp. 1394-1414." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.711, + 0.922, + 0.768 + ], + "angle": 0, + "content": "[908] Z. Chen, Z. Xiang, C. Xiao, D. Song, and B. Li, \"Agent-poison: Red-teaming llm agents via poisoning memory or knowledge bases,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.769, + 0.922, + 0.812 + ], + "angle": 0, + "content": "[909] X. Pan, J. Dai, Y. Fan, and M. Yang, \"Frontier ai systems have surpassed the self-replicating red line,\" arXiv preprint arXiv:2412.12140, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.813, + 0.922, + 0.885 + ], + "angle": 0, + "content": "[910] L. Yu, Y. Qiu, Q. Yao, Y. Shen, X. Zhang, and J. Wang, \"Robust communicative multi-agent reinforcement learning with active defense,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 16, 2024, pp. 17575-17582." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.886, + 0.922, + 0.928 + ], + "angle": 0, + "content": "[911] J. Light, M. Cai, S. Shen, and Z. Hu, \"Avalonbench: Evaluating llms playing the game of avalon,\" arXiv preprint arXiv:2310.05036, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.929, + 0.922, + 0.944 + ], + "angle": 0, + "content": "[912] Q. Xie, Q. Feng, T. Zhang, Q. Li, L. Yang, Y. Zhang," + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.922, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "68" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.054, + 0.49, + 0.098 + ], + "angle": 0, + "content": "R. Feng, L. He, S. Gao, and Y. Zhang, \"Human simulacra: Benchmarking the personification of large language models,\" arXiv preprint arXiv:2402.18180, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.098, + 0.492, + 0.142 + ], + "angle": 0, + "content": "[913] L. Geng and E. Y. Chang, \"Realm-bench: A real-world planning benchmark for llms and multi-agent systems,\" arXiv preprint arXiv:2502.18836, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.142, + 0.492, + 0.199 + ], + "angle": 0, + "content": "[914] Y. Dubois, B. Galambosi, P. Liang, and T. B. Hashimoto, \"Length-controlled alpacaeval: A simple way to debias automatic evaluators,\" arXiv preprint arXiv:2404.04475, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.2, + 0.492, + 0.272 + ], + "angle": 0, + "content": "[915] W. Wang, J. Shi, C. Wang, C. Lee, Y. Yuan, J.-T. Huang, and M. R. Lyu, \"Learning to ask: When llms meet unclear instruction,\" ArXiv, vol. abs/2409.00557, 2024. [Online]. Available: https://api-semanticscholar.org/CorpusID:272368496" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.273, + 0.492, + 0.345 + ], + "angle": 0, + "content": "[916] C. Guo, X. Liu, C. Xie, A. Zhou, Y. Zeng, Z. Lin, D. Song, and B. Li, \"Redcode: Risky code execution and generation benchmark for code agents,\" Advances in Neural Information Processing Systems, vol. 37, pp. 106-190-106-236, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.346, + 0.492, + 0.417 + ], + "angle": 0, + "content": "[917] X. Yuan, J. Li, D. Wang, Y. Chen, X. Mao, L. Huang, H. Xue, W. Wang, K. Ren, and J. Wang, \"S-eval: Automatic and adaptive test generation for benchmarking safety evaluation of large language models,\" arXiv preprint arXiv:2405.14191, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.418, + 0.492, + 0.476 + ], + "angle": 0, + "content": "[918] D. Dorn, A. Variengien, C.-R. Segerie, and V. Corruble, \"Bells: A framework towards future proof benchmarks for the evaluation of llm safeguards,\" arXiv preprint arXiv:2406.01364, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.477, + 0.492, + 0.534 + ], + "angle": 0, + "content": "[919] Y. Shao, T. Li, W. Shi, Y. Liu, and D. Yang, \"Privacylens: Evaluating privacy norm awareness of language models in action,\" arXiv preprint arXiv:2409.00138, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.535, + 0.492, + 0.592 + ], + "angle": 0, + "content": "[920] Q. Zhan, Z. Liang, Z. Ying, and D. Kang, \"Injecagent: Benchmarking indirect prompt injections in tool-integrated large language model agents,\" arXiv preprint arXiv:2403.02691, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.593, + 0.492, + 0.664 + ], + "angle": 0, + "content": "[921] Z. Zhu, B. Wu, Z. Zhang, and B. Wu, \"Riskawarebench: Towards evaluating physical risk awareness for high-level planning of llm-based embodied agents,\" arXiv e-prints, pp. arXiv-2408, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.665, + 0.492, + 0.71 + ], + "angle": 0, + "content": "[922] Z. Zhang, S. Cui, Y. Lu, J. Zhou, J. Yang, H. Wang, and M. Huang, \"Agent-safetybench: Evaluating the safety of llm agents,\" arXiv preprint arXiv:2412.14470, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.71, + 0.492, + 0.782 + ], + "angle": 0, + "content": "[923] M. Andriushchenko, A. Souly, M. Dziemian, D. Duenas, M. Lin, J. Wang, D. Hendrycks, A. Zou, Z. Kolter, M. Fredrikson et al., \"Agentharm: A benchmark for measuring harmfulness of llm agents,\" arXiv preprint arXiv:2410.09024, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.783, + 0.492, + 0.841 + ], + "angle": 0, + "content": "[924] J. Ye, S. Li, G. Li, C. Huang, S. Gao, Y. Wu, Q. Zhang, T. Gui, and X. Huang, \"Toolsword: Unveiling safety issues of large language models in tool learning across three stages,\" arXiv preprint arXiv:2402.10753, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.841, + 0.492, + 0.899 + ], + "angle": 0, + "content": "[925] Y. Ruan, H. Dong, A. Wang, S. Pitis, Y. Zhou, J. Ba, Y. Dubois, C. J. Maddison, and T. Hashimoto, \"Identifying the risks of lm agents with an lm-emulated sandbox,\" arXiv preprint arXiv:2309.15817, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.9, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[926] X. Zhou, H. Kim, F. Brahman, L. Jiang, H. Zhu, X. Lu, F. Xu, B. Y. Lin, Y. Choi, N. Mireshghallah et al., \"Haicosystem: An ecosystem for sandboxing" + }, + { + "type": "list", + "bbox": [ + 0.076, + 0.054, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.922, + 0.083 + ], + "angle": 0, + "content": "safety risks in human-ai interactions,\" arXiv preprint arXiv:2409.16427, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.083, + 0.922, + 0.142 + ], + "angle": 0, + "content": "[927] S. Yin, X. Pang, Y. Ding, M. Chen, Y. Bi, Y. Xiong, W. Huang, Z. Xiang, J. Shao, and S. Chen, \"Safeagent-bench: A benchmark for safe task planning of embodied llm agents,\" arXiv preprint arXiv:2412.13178, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.142, + 0.922, + 0.185 + ], + "angle": 0, + "content": "[928] J. BENCHMARK, \"Jailjudge: A comprehensive jailbreak judge benchmark with multi-agent enhanced explanation evaluation framework.\"" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.185, + 0.922, + 0.242 + ], + "angle": 0, + "content": "[929] P. Y. Zhong, S. Chen, R. Wang, M. McCall, B. L. Titzer, and H. Miller, \"Rtbas: Defending llm agents against prompt injection and privacy leakage,\" arXiv preprint arXiv:2502.08966, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.243, + 0.922, + 0.316 + ], + "angle": 0, + "content": "[930] A. Liu, Y. Zhou, X. Liu, T. Zhang, S. Liang, J. Wang, Y. Pu, T. Li, J. Zhang, W. Zhou et al., \"Compromising lvm driven embodied agents with contextual backdoor attacks,\" IEEE Transactions on Information Forensics and Security, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.317, + 0.922, + 0.359 + ], + "angle": 0, + "content": "[931] —, \"Compromising embodied agents with contextual backdoor attacks,\" arXiv preprint arXiv:2408.02882, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.36, + 0.922, + 0.417 + ], + "angle": 0, + "content": "[932] H. Zhang, C. Zhu, X. Wang, Z. Zhou, S. Hu, and L. Y. Zhang, \"Badrobot: Jailbreaking llm-based embodied ai in the physical world,\" arXiv preprint arXiv:2407.20242, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.418, + 0.922, + 0.476 + ], + "angle": 0, + "content": "[933] W. Shen, C. Li, H. Chen, M. Yan, X. Quan, H. Chen, J. Zhang, and F. Huang, \"Small llms are weak tool learners: A multi-llm agent,\" arXiv preprint arXiv:2401.07324, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.476, + 0.922, + 0.534 + ], + "angle": 0, + "content": "[934] S. Yuan, K. Song, J. Chen, X. Tan, Y. Shen, R. Kan, D. Li, and D. Yang, \"Easytool: Enhancing llm-based agents with concise tool instruction,\" arXiv preprint arXiv:2401.06201, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.535, + 0.922, + 0.607 + ], + "angle": 0, + "content": "[935] S. Wu, S. Zhao, Q. Huang, K. Huang, M. Yasunaga, K. Cao, V. Ioannidis, K. Subbian, J. Leskovec, and J. Y. Zou, \"Avatar: Optimizing llm agents for tool usage via contrastive reasoning,\" Advances in Neural Information Processing Systems, vol. 37, pp. 25981-26010, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.608, + 0.922, + 0.637 + ], + "angle": 0, + "content": "[936] Z. Shen, \"Llm with tools: A survey,\" arXiv preprint arXiv:2409.18807, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.637, + 0.922, + 0.695 + ], + "angle": 0, + "content": "[937] C. Qian, W. Liu, H. Liu, N. Chen, Y. Dang, J. Li, C. Yang, W. Chen, Y. Su, X. Cong et al., \"Chatdev: Communicative agents for software development,\" arXiv preprint arXiv:2307.07924, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.695, + 0.922, + 0.767 + ], + "angle": 0, + "content": "[938] Z. M. Wang, Z. Peng, H. Que, J. Liu, W. Zhou, Y. Wu, H. Guo, R. Gan, Z. Ni, J. Yang et al., \"Rolellm: Benchmarking, eliciting, and enhancing role-playing abilities of large language models,\" arXiv preprint arXiv:2310.00746, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.768, + 0.922, + 0.84 + ], + "angle": 0, + "content": "[939] J. Zhou, Z. Chen, D. Wan, B. Wen, Y. Song, J. Yu, Y. Huang, L. Peng, J. Yang, X. Xiao et al., \"Characterglm: Customizing chinese conversational ai characters with large language models,\" arXiv preprint arXiv:2311.16832, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.841, + 0.922, + 0.899 + ], + "angle": 0, + "content": "[940] Z. Chen, K. Liu, Q. Wang, W. Zhang, J. Liu, D. Lin, K. Chen, and F. Zhao, \"Agent-flan: Designing data and methods of effective agent tuning for large language models,\" arXiv preprint arXiv:2403.12881, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.899, + 0.922, + 0.944 + ], + "angle": 0, + "content": "[941] G. Zhang, L. Niu, J. Fang, K. Wang, L. Bai, and X. Wang, \"Multi-agent architecture search via agentic supernet,\" arXiv preprint arXiv:2502.04180, 2025." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.922, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.422, + 0.045 + ], + "angle": 0, + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + }, + { + "type": "page_number", + "bbox": [ + 0.907, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "69" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.098 + ], + "angle": 0, + "content": "[942] L. P. Kaelbling, M. L. Littman, and A. W. Moore, \"Reinforcement learning: A survey,\" Journal of artificial intelligence research, vol. 4, pp. 237-285, 1996." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.098, + 0.492, + 0.127 + ], + "angle": 0, + "content": "[943] Y. Li, \"Deep reinforcement learning: An overview,\" arXiv preprint arXiv:1701.07274, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.127, + 0.492, + 0.17 + ], + "angle": 0, + "content": "[944] X. Li, Y. Fan, and S. Cheng, \"Aigc in china: Current developments and future outlook,\" arXiv preprint arXiv:2308.08451, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.17, + 0.492, + 0.227 + ], + "angle": 0, + "content": "[945] X. Sun, L. Dong, X. Li, Z. Wan, S. Wang, T. Zhang, J. Li, F. Cheng, L. Lyu, F. Wu et al., \"Pushing the limits of chatgpt on nlp tasks,\" arXiv preprint arXiv:2306.09719, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.228, + 0.492, + 0.302 + ], + "angle": 0, + "content": "[946] G. Sriramanan, S. Bharti, V. S. Sadasivan, S. Saha, P. Kattakinda, and S. Feizi, \"Llm-check: Investigating detection of hallucinations in large language models,\" Advances in Neural Information Processing Systems, vol. 37, pp. 34188-34216, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.302, + 0.492, + 0.375 + ], + "angle": 0, + "content": "[947] K. Zheng, J. Chen, Y. Yan, X. Zou, and X. Hu, \"Reefknot: A comprehensive benchmark for relation hallucination evaluation, analysis and mitigation in multimodal large language models,\" arXiv preprint arXiv:2408.09429, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.375, + 0.492, + 0.447 + ], + "angle": 0, + "content": "[948] X. Zou, Y. Wang, Y. Yan, S. Huang, K. Zheng, J. Chen, C. Tang, and X. Hu, \"Look twice before you answer: Memory-space visual retracing for hallucination mitigation in multimodal large language models,\" arXiv preprint arXiv:2410.03577, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.447, + 0.492, + 0.52 + ], + "angle": 0, + "content": "[949] G. Zhou, Y. Yan, X. Zou, K. Wang, A. Liu, and X. Hu, \"Mitigating modality prior-induced hallucinations in multimodal large language models via deciphering attention causality,\" arXiv preprint arXiv:2410.04780, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.521, + 0.492, + 0.594 + ], + "angle": 0, + "content": "[950] W. Wang, Z. Ma, Z. Wang, C. Wu, W. Chen, X. Li, and Y. Yuan, \"A survey of llm-based agents in medicine: How far are we from baymax?\" ArXiv, vol. abs/2502.11211, 2025. [Online]. Available: https://api.sementicscholar.org/CorpusID:276408182" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.594, + 0.492, + 0.637 + ], + "angle": 0, + "content": "[951] H. Kang and X.-Y. Liu, \"Deficiency of large language models in finance: An empirical examination of hallucination,\" arXiv preprint arXiv:2311.15548, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.637, + 0.492, + 0.738 + ], + "angle": 0, + "content": "[952] L. Ouyang, J. Wu, X. Jiang, D. Almeida, C. L. Wainwright, P. Mishkin, C. Zhang, S. Agarwal, K. Slama, A. Ray, J. Schulman, J. Hilton, F. Kelton, L. Miller, M. Simens, A. Askell, P. Welinder, P. F. Christiano, J. Leike, and R. Lowe, \"Training language models to follow instructions with human feedback,\" in NeurIPS, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.739, + 0.492, + 0.798 + ], + "angle": 0, + "content": "[953] Y. Liu, Y. Yao, J.-F. Ton, X. Zhang, R. Guo, H. Cheng, Y. Klochkov, M. F. Taufiq, and H. Li, \"Trustworthy llms: a survey and guideline for evaluating large language models' alignment,\" 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.798, + 0.492, + 0.855 + ], + "angle": 0, + "content": "[954] M. Hao, H. Li, H. Chen, P. Xing, G. Xu, and T. Zhang, \"Iron: Private inference on transformers,\" Advances in neural information processing systems, vol. 35, pp. 15718-15731, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.855, + 0.492, + 0.929 + ], + "angle": 0, + "content": "[955] J. Huang, J.-T. Huang, Z. Liu, X. Liu, W. Wang, and J. Zhao, \"Vlms as geoguessr masters: Exceptional performance, hidden biases, and privacy risks,\" ArXiv, vol. abs/2502.11163, 2025. [Online]. Available: https://api.sementicscholar.org/CorpusID:276409319" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.929, + 0.492, + 0.944 + ], + "angle": 0, + "content": "[956] G. Feretzakis and V. S. Verykios, \"Trustworthy ai:" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.054, + 0.492, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.549, + 0.054, + 0.923, + 0.083 + ], + "angle": 0, + "content": "Securing sensitive data in large language models,\" AI, vol. 5, no. 4, pp. 2773-2800, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.083, + 0.923, + 0.142 + ], + "angle": 0, + "content": "[957] Q. Feng, S. R. Kasa, H. Yun, C. H. Teo, and S. B. Bodapati, \"Exposing privacy gaps: Membership inference attack on preference data for llm alignment,\" arXiv preprint arXiv:2407.06443, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.142, + 0.923, + 0.185 + ], + "angle": 0, + "content": "[958] N. Rahman and E. Santacana, “Beyond fair use: Legal risk evaluation for training llms on copyrighted text,” in ICML Workshop on Generative AI and Law, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.185, + 0.923, + 0.258 + ], + "angle": 0, + "content": "[959] J. Guo, Y. Li, R. Chen, Y. Wu, C. Liu, Y. Chen, and H. Huang, \"Towards copyright protection for knowledge bases of retrieval-augmented language models via ownership verification with reasoning,\" arXiv preprint arXiv:2502.10440, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.259, + 0.923, + 0.317 + ], + "angle": 0, + "content": "[960] S. Shao, Y. Li, H. Yao, Y. He, Z. Qin, and K. Ren, \"Explanation as a watermark: Towards harmless and multi-bit model ownership verification via watermarking feature attribution,\" in NDSS, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.317, + 0.923, + 0.36 + ], + "angle": 0, + "content": "[961] W. Xu, K. Gao, H. He, and M. Zhou, \"Licoeval: Evaluating llms on license compliance in code generation,\" arXiv preprint arXiv:2408.02487, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.36, + 0.923, + 0.418 + ], + "angle": 0, + "content": "[962] W. Qu, W. Zheng, T. Tao, D. Yin, Y. Jiang, Z. Tian, W. Zou, J. Jia, and J. Zhang, \"Provably robust multi-bit watermarking for ai-generated text,\" arXiv preprint arXiv:2401.16820, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.418, + 0.923, + 0.477 + ], + "angle": 0, + "content": "[963] J. Kirchenbauer, J. Geiping, Y. Wen, J. Katz, I. Miers, and T. Goldstein, \"A watermark for large language models,\" in International Conference on Machine Learning. PMLR, 2023, pp. 17061-17084." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.477, + 0.923, + 0.536 + ], + "angle": 0, + "content": "[964] J. Ye, Y. Wang, Y. Huang, D. Chen, Q. Zhang, N. Moniz, T. Gao, W. Geyer, C. Huang, P.-Y. Chen et al., \"Justice or prejudice? quantifying biases in llm-as-a-judge,\" arXiv preprint arXiv:2410.02736, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.535, + 0.923, + 0.637 + ], + "angle": 0, + "content": "[965] Y. Wan, W. Wang, P. He, J. Gu, H. Bai, and M. R. Lyu, \"Biasaker: Measuring the bias in conversational ai system,\" Proceedings of the 31st ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering, 2023. [Online]. Available: https://api-semanticscholar.org/CorpusID:258833296" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.637, + 0.923, + 0.681 + ], + "angle": 0, + "content": "[966] European Union, \"Artificial intelligence act,\" 2024, accessed: 2025-03-07. [Online]. Available: https://artificialintelligenceact.eu/" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.681, + 0.923, + 0.753 + ], + "angle": 0, + "content": "[967] Cyberspace Administration of China, \"Interim measures for the management of generative artificial intelligence services,\" 2023, accessed: 2025-03-07. [Online]. Available: https://www.cac.gov.cn/2023-07/13/c_1690898327029107.htm" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.753, + 0.923, + 0.798 + ], + "angle": 0, + "content": "[968] The White House, \"Safe, secure, and trustworthy development and use of artificial intelligence,\" 2023, accessed: 2025-03-07." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.054, + 0.923, + 0.798 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15585/c8803fe7-d918-414b-a5e2-cfaba643acbf_origin.pdf b/data/2025/2504_15xxx/2504.15585/c8803fe7-d918-414b-a5e2-cfaba643acbf_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e0630cdb580c36685c7455e7e8099078d9076055 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/c8803fe7-d918-414b-a5e2-cfaba643acbf_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22c899078b7fd1863df1dbb37bc93d5bdc09f82b02e6b451deccdecd6e7cfed0 +size 11965246 diff --git a/data/2025/2504_15xxx/2504.15585/full.md b/data/2025/2504_15xxx/2504.15585/full.md new file mode 100644 index 0000000000000000000000000000000000000000..d4ba852f6b823d8e02672f0afa23ae9a1771296e --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/full.md @@ -0,0 +1,1976 @@ +# A Comprehensive Survey in LLM(-Agent) Full Stack Safety: Data, Training and Deployment + +Kun Wang\*1,2, Guibin Zhang\*3, Zhenhong Zhou†4, Jiahao Wu†5,6, Miao Yu7, Shiqian Zhao1, Chenlong Yin8, Jinhu Fu9, Yibo Yan10,11, Hanjun Luo12, Liang Lin13, Zhihao Xu14, Haolang Lu1, Xinye Cao1, Xinyun Zhou1, Weifei Jin1, Fanci Meng7, Shicheng Xu15, Junyuan Mao3, Yu Wang16, Hao Wu17, Minghe Wang12, Fan Zhang18, Junfeng Fang3, Wenjie Qu3, Yue Liu3, Chengwei Liu1, Yifan Zhang19, Qiankun Li7, Chongye Guo20,21, Yalan Qin20,21, Zhaoxin Fan22, Kai Wang3, Yi Ding1, Donghai Hong23, Jiaming Ji23, Yingxin Lai24, Zitong Yu24, Xinfeng Li1, Yifan Jiang25, Yanhui Li12, Xinyu Deng12, Junlin Wu12, Dongxia Wang12, Yihao Huang1, Yufei Guo23, Jen-tse Huang26, Qiufeng Wang27, Xiaolong Jin45, Wenxuan Wang14, Dongrui Liu21, Yanwei Yue23, Wenke Huang29, Guancheng Wan30, Heng Chang46, Tianlin Li1, Yi Yu1, Chenghao Li31, Jiawei Li33, Lei Bai21, Jie Zhang4, Qing Guo4, Jingyi Wang12, Tianlong Chen32, Joey Tianyi Zhou4, Xiaojun Jia1, Weisong Sun1, Cong Wu34, Jing Chen29, Xuming Hu10,11, Yiming Li1, Xiao Wang35, Ningyu Zhang12, Luu Anh Tuan1, Guowen Xu31, Jiaheng Zhang3, Tianwei Zhang1, Xingjun Ma37, Jindong Gu38, Liang Pang15, Xiang Wang7, Bo An1, Jun Sun36, Mohit Bansal32, Shirui Pan28, Lingjuan Lyu40, Yuval Elovici41, Bhavya Kailkhura42, Yaodong Yang23, Hongwei Li31, Wenyuan Xu12, Yizhou Sun30, Wei Wang30, Qing Li5, Ke Tang6, Yu-Gang Jiang37, Felix Juefei-Xu43, Hui Xiong10,11, Xiaofeng Wang46, Dacheng Tao1, Philip S. Yu44, Qingsong Wen2, Yang Liu1 + +$^{1}$ Nanyang Technological University, $^{2}$ Squirrel AI Learning, $^{3}$ National University of Singapore, $^{4}$ A*STAR, $^{5}$ The Hong Kong Polytechnic University, $^{6}$ Southern University of Science and Technology, $^{7}$ University of Science and Technology of China, $^{8}$ The Pennsylvania State University, $^{9}$ TeleAI, $^{10}$ Hong Kong University of Science and Technology (Guangzhou), $^{11}$ Hong Kong University of Science and Technology, $^{12}$ Zhejiang University, $^{13}$ Institute of Information Engineering, Chinese Academy of Sciences, $^{14}$ Renmin University of China, $^{15}$ Institute of Computing Technology, Chinese Academy of Sciences, $^{16}$ University of California, San Diego, $^{17}$ Tencent, $^{18}$ Georgia Institute of Technology, $^{19}$ Institute of Automation, Chinese Academy of Sciences, $^{20}$ Shanghai University, $^{21}$ Shanghai AI Laboratory, $^{22}$ Beihang University, $^{23}$ Peking University, $^{24}$ Great Bay University, $^{25}$ University of Southern California, $^{26}$ Johns Hopkins University, $^{27}$ Southeast University, $^{28}$ Griffith University, $^{29}$ Wuhan University, $^{30}$ University of California, Los Angeles, $^{31}$ University of Electronic Science and Technology of China, $^{32}$ The University of North Carolina at Chapel Hill, $^{33}$ Tsinghua University, $^{34}$ The University of Hong Kong, $^{35}$ University of Washington, $^{36}$ Singapore Management University, $^{37}$ Fudan University, $^{38}$ University of Oxford, $^{39}$ New York University, $^{40}$ Sony, $^{41}$ Ben Gurion University, $^{42}$ Lawrence Livermore National Laboratory, $^{43}$ New York University, $^{44}$ University of Illinois at Chicago, $^{45}$ Purdue University, $^{46}$ ACM Member + +Abstract—The remarkable success of Large Language Models (LLMs) has illuminated a promising pathway toward achieving Artificial General Intelligence for both academic and industrial communities, owing to their unprecedented performance across various applications. As LLMs continue to gain prominence in both research and commercial domains, their security and safety implications have become a growing concern, not only for researchers and corporations but also for all nations. Currently, existing surveys on LLM safety primarily focus on specific stages of the LLM lifecycle, e.g., deployment phase or fine-tuning phase, lacking a comprehensive understanding of the entire "lifechain" of LLMs. To address this gap, this paper introduces, for the first time, the concept of "full-stack" safety to systematically consider safety issues throughout the entire process of data, training (pre-training, post-training), deployment (deployment and final commercialization). Compared to the off-the-shelf LLM safety surveys, our work demonstrates several distinctive advantages: (I) Comprehensive Perspective. We define the complete LLM lifecycle as encompassing data preparation, pre-training, post-training (including alignment and fine-tuning, model editing, etc.), deployment and final commercialization. To our knowledge, this represents the first safety survey to encompass the entire lifecycle of LLMs. (II) Extensive Literature Support. Our research is grounded in an exhaustive review of over $900+$ papers, ensuring comprehensive coverage and systematic organization of safety issues within a more holistic understanding. (III) Unique Insights. Through systematic literature analysis, we develop reliable roadmaps and perspectives for each chapter. Our work identifies promising research directions, including safety in data generation, alignment techniques, model editing, and LLM-based agent systems. These insights provide valuable guidance for researchers pursuing future work in this field. We provide an up-to-date review of the literature on LLM (agent) safety at https://github.com/bingreeky/full-stack-llm-safety, which can be considered a useful support for both researchers and engineers. + +# 1 INTRODUCTION + +The emergence and success of large language models (LLMs) [1, 2, 3, 4, 5] have greatly transformed the modes of production in both academia and industry [6, 7, 8, 9, 10, 11, 12, 13], opening a potential path for the upcoming artificial general intelligence [14, 15, 16]. Going beyond this, LLMs, by integrating tools [17, 18, 19, 20], memory [21, 22, 23, 24], APIs [25, 26], and by constructing single-agent or multiagent systems with other LLMs, provide powerful tools for large models to perceive, understand, and change the environment [27, 28, 29, 30]. This has garnered considerable attention for embodied intelligence [31, 32]. + +Unfortunately, the entire lifecycle of LLMs is constantly confronted with security and safety issues [33, 34, 35, 36, 37]. During the data preparation phase, since LLMs require ample and diverse data, and a significant amount of data is sourced from the Internet and other open-source scenarios, the toxicity in the data and user privacy may seep into the model parameters, triggering crises in the model [38, 39, 40]. The pretraining process of the model, due to its unsupervised nature, unconsciously absorbs these toxic data and privacy information, thereby causing the model's "genetic makeup" to carry dangerous characteristics and privacy issues [41, 42, 43, 44]. + +Before the model is deployed, if it is not properly aligned with security measures, it can easily deviate from human values [45, 46]. Meanwhile, to make the model more "specialized," the fine-tuning process will employ safer and more customized data to ensure the model performs flawlessly in specific domains [47, 48, 49, 50]. The model deployment process also involves issues such as jailbreak attacks and corresponding defense measures [51, 52, 53], especially for LLM-based agents [54]. These agents may become contaminated due to their interaction with tools, memory, and the environment [55, 56, 57, 58]. + +Previous surveys on LLMs have primarily focused on the research aspects of LLM itself, often overlooking detailed discussions on LLM safety [7, 34] and in-depth exploration of trustworthiness issues [75]. Meanwhile, off-the-shelf surveys that do address LLM safety tend to concentrate on various trustworthiness concerns or are limited to a single phase of the LLM lifecycle [33, 76, 77], such as the deployment stage and fine-tuning stage. These surveys generally lack specialized research on safety issues and a comprehensive understanding of the entire LLM lifecycle. Table 1 summarizes the differences between our survey and previous surveys. Upon reviewing the aforementioned survey and systematically investigating the related literature, we conclude that our survey endeavors to address several questions that existing surveys have not covered: + +TABLE 1: Survey Comparison on LLMs and Agents settings. + +
SurveyObjectStage*
\( LLM^‡ \)\( Agent^§ \)DataPTEditFTDepEval
Year 2023
Zhao et al. [6]S+M-X
Liang et al. [59]M-XX
Chang et al. [7]S+M-XXXX
Zhang et al. [60]S+M-XXX
Wang et al. [28]-SXXXX
Zhao et al. [61]S-XXX
Xi et al. [29]-S+MASXXXX
Shen et al. [62]S-XXX
Raijan et al. [63]S-XXXX
Kalyan et al. [64]S+M-XX
Huang et al. [51]S-XXX
Shayegani et al. [65]S+MMASXXXX
Yao et al. [66]S-XXXX
Year 2024
Guo et al. [27]-S+MASXXXX
Qin et al. [67]S+M-XX
Hadi et al. [68]S-XXX
Sun et al. [69]S+MSXXX
Das et al. [70]S-XXXX
He et al. [71]-S+M+MASXXXXX
Wang et al. [54]-S+MASXXXXX
Year 2025
Tie et al. [72]S+M-XXX
Ma et al. [33]S+MS+MXX
Huang et al. [73]S+MS+MXX
Yu et al. [74]SS+MASXXXX
Chen et al. [36]S-XX
OursS+MS+M+MAS
+ +$\ddagger$ : Single-modal LLM (S), Multi-modal LLM (M). +$\S$ : Single-modal Agent (S), Multi-modal Agent (M), Multi-agent System (MAS). +$\star$ : Pre-training (PT), Fine-tuning (FT), Deployment (Dep), Evaluation (Eval). + +![](images/d343f41a4fec2cbd537ab3a55e973848372f25236d86cec814430e4e0878d83c.jpg) + +# What aspects should the safety of large models be compass? + +Contribution 1. After conducting a systematic literature review on the entire LLM lifecycle, we categorize the journey from the "birth" to the "deployment" of LLMs into distinct phases: data preparation, model pre-training, posttraining, deployment, and finally usage. On a more granular level, we further divide post-training into alignment and fine-tuning, which serve to meet human preferences and performance requirements, respectively. Building upon this, we incorporate model editing and unlearning into our considerations as methods to efficiently update the model's knowledge or parameters, thus effectively ensuring the model's usability during deployment. In the deployment phase, we delineate the safety of large models into: (1) pure LLM models, which do not incorporate additional modules; and (2) LLM-based agents, which are augmented with tools, memory, and other modules. This framework encompasses the entire cycle of model parameter training, convergence, and solidification. + +![](images/27b4f09fd68aecd75ec6c15b4737e73a3198fcfd3d6788e2cf6e5147233143f5.jpg) + +# How to provide a clearer taxonomy and literature ew? + +Contribution 2. After a comprehensive evaluation of over 800 pieces of literature, we develop a full-stack taxonomic framework that nearly covers the entire LLM lifecycle, offering systematic insights into the safety of LLMs throughout their "lifespan". We provide a more reliable + +correlation analysis between each phase of the LLM timeline and other relevant sections, aiding readers in understanding the safety issues of LLMs while also clarifying the research stage of each LLM phase. + +![](images/869c97d2b966b4966a92a247de0ac1218fd4faaff7252718c47a2b6aff524844.jpg) + +# What are the potential growth areas for future M safety concerns? + +Contribution 3. Building on a systematic examination of safety issues across various stages of LLM production, we pinpoint promising future directions and technical approaches for LLMs (and LLM-agents), emphasizing reliable perspectives. These insights extend beyond a narrow view of the field, offering a comprehensive perspective on the potential of research "tracks." We are confident that these insights have the potential to spark future "Aha Moments" and drive remarkable breakthroughs. + +Taxonomy. Our article begins with the structural preparation of data. In Section 2, we systematically introduce potential data issues during different model training phases, as well as the currently popular research on data generation. In Section 3, we focus on the security and safety concerns during the pre-training phase, which includes two core modules: data filtering and augmenting. In Section 4, we concentrate on the post-training phase, differing from previous works by incorporating fine-tuning and alignment, which involve attack, defense, and evaluation. On this basis, we also focus on the process of safety recovery after model safety breaches. In Section 5, we observe that models require dynamic updates in real-world scenarios. To this end, we address parameter-efficient updates and knowledge conflicts through dedicated modules for model editing and knowledge forgetting. Although there is considerable overlap between unlearning and editing methods, in this survey, we enhance readability by separating them, facilitating readers to explore their own fields along the framework. Subsequently, in Section 6, we focus on the safety issues after the model parameters are solidified, which share many commonalities with traditional large model security surveys. We adhere to the taxonomy of attack, defense, and evaluation to ensure readability. Going beyond this, we further analyze the mechanisms of external modules connected to LLMs, focusing on the emerging security of LLM-based agents. Finally, in Section 7, we present multiple safety concerns for the commercialization and ethical guidelines, as well as user usage, of LLM-based applications. To provide readers with a comprehensive understanding of our research framework, we dedicate Section 8 to outlining promising future research directions, while Section 9 presents synthesized conclusions and broader implications. + +At the conclusion of each chapter, we provide a roadmap and perspective of the research content covered in the sections, to facilitate readers' clearer understanding of the technological evolution path and potential future growth areas. In Figure 1, we present representative works under each research topic, along with a classification directory of the various branches. Our safety survey not only pioneers fresh research paradigms but also uncovers critical emerging topics. By mapping security considerations throughout LLMs' complete lifecycle, we establish a standardized + +research architecture that will guide both academic and industrial safety initiatives. + +# 2 DATA SAFETY + +In the first section, we begin with the data. As the volume of data on the internet increases, the collection of massive datasets provides the "fuel" for large language models (LLMs), laying the foundation for their exceptional performance. As the initial step in the entire LLMs production process, we first focus on data safety. Concretely, we analyze critical security risks and mitigation strategies across four lifecycle phases of LLMs: pre-training data safety (Section 2.1), fine-tuning data safety (Section 2.2) and alignment data safety (Section 2.3). Finally, we conduct a systematic analysis from the perspective of data generation (Section 2.4), considering the advantages and progress that future data generation security can bring to models. We summarize the literature on secure and reliable data generation. + +# 2.1 Pretraining Data Safety + +The pretraining phase of LLMs relies heavily on massive, diverse datasets collected from the Internet [78, 79, 80] or open-source data platforms [81, 82] (e.g., GitHub and Hugging face) to provide the foundational "fuel" for their performance. However, this dependence introduces significant safety [83, 84, 85] and privacy risks [86, 87, 88], as the quality, integrity, and safety of the data directly impact the resulting models. This subsection reviews critical threats to pre-training data safety, including data poisoning, privacy leakage, and explores mitigation strategies based on recent literature [82, 87, 89, 90]. + +Training Data Poisoning. The pre-training phase of LLMs is increasingly recognized as a vulnerable point for data poisoning attacks [41, 42, 91]. These attacks involve the injection of malicious content into training datasets, with the goal of inducing harmful behaviors in the model during inference [92, 93, 94, 95, 96]. Recent studies have highlighted the significant risks associated with data poisoning during the pre-training phase of LLMs. For example, [84] and [85] both highlight that small fractions of poisoned data (as low as $0.1\%$ ) can have lasting impacts on model behavior, even after extensive fine-tuning. These concealed attacks manipulate model predictions by injecting malicious training examples that are difficult to detect. Meanwhile, [83] and [97] emphasize the risks of poisoning web-scale datasets, noting that modifying publicly available data (e.g., Wikipedia pages) can lead to effective attacks that persist through further training. The study by Sun et al. [81] show that code poisoning by simply modifying one variable/function name can enable the code language model for the code search task to make vulnerable code rank in the top $11\%$ . + +Privacy leakage. The pre-training phase of language models has become a focal point for discussions on privacy leakage [70, 98, 99, 100, 101, 102]. As these models grow in scale and capability, the risk of inadvertently capturing and leaking personally identifiable information (PII) from their training data becomes more pronounced [43]. [103, 104, 105] have specifically highlighted this concern in the context of LLMs, demonstrating that these models can memorize and + +![](images/24a54e97c11e8e51e263b7b98b9b21713213013b735e4963916bfb2d477a4b18.jpg) +Fig. 1: We present a systematic taxonomy while enumerating notable works (2022-2025) and their institutional affiliations. + +reproduce sensitive information through targeted attacks. Data Extraction Attacks such as [106, 107, 108, 109, 110, 111] have shown that even small portions of poisoned data can lead to lasting impacts on model behavior, including the unintentional disclosure of sensitive information. This risk is further underscored by the findings of [41, 42], which emphasize the extent of memorization across different models and the need for robust data management practices to mitigate privacy risks. Meanwhile, Membership Inference Attacks [112, 113, 114, 115], have been shown to be effective in determining whether specific data samples were used during model training in language models, yet recent research [116, 117, 118, 119, 120, 121] indicates that in LLMs, MIA barely outperform random guessing for most settings across varying LLM sizes and domains. Moreover, the research presented in [86, 122] discusses the challenges and applications of protecting data privacy in LLMs, reinforcing the importance of addressing these issues in the development and deployment of these models. + +Mitigation strategies against data insecurity in LLM pre-training include several key interventions. To address toxic content, custom classifiers trained on safety datasets + +are employed to detect and filter pre-training data [89, 123, 124]. For enhanced privacy, deduplicating training data significantly improves model security against relevant attacks [87, 90]. Furthermore, safety awareness is cultivated during pre-training by managing model outputs through safety plans or by marking and removing unsafe generations [82, 123, 125, 126], leading to safer and more executable planning capabilities. + +Mitigation measures. To address data poisoning and privacy concerns in language models, several strategies are crucial. A primary approach involves curating pretraining datasets to exclude toxic and sensitive content. [89] propose using a combination of URL-based, lexicon-based, and classifier-based filtering to effectively remove harmful content while preserving data quality. Another important strategy is employing data dedduplication techniques, which can prevent model memorization of specific instances, thereby reducing privacy risks. [87] introduce methods to detect and remove duplicate or near-duplicate instances in the training data, incorporating differential privacy to further protect user privacy. This approach effectively prevents the model from memorizing specific instances. In addition, developing + +![](images/c15a0bf22feefb3d8da849662a077a55a7495872bd7c6e07d3f46668bf5282f1.jpg) +Fig. 2: LLMs encounter a wide range of data safety risks throughout their lifecycle, from the initial stages of data collection and pre-processing to model training, deployment, and ongoing updates. + +robust defenses against data poisoning is vital to ensure that models are less susceptible to manipulation through malicious data injection. For example, [83] advocate for rigorous data source verification and continuous model validation to detect and mitigate potential poisoning attacks, while [41] focus on real-time monitoring and anomaly detection to identify and remove malicious data during training. + +# 2.2 Fine-tuning Data Safety + +Data safety in the fine-tuning stage has emerged as a critical concern in the development of LLMs, with data poisoning attacks presenting particularly sophisticated threats to LLMs [127]. Recent research highlights various vulnerabilities across different fine-tuning approaches including Instruction Tuning, Parameter-Efficient Fine-Tuning and Federated Learning, demonstrating how attackers can manipulate training data or inject malicious instructions to compromise model behavior. These risks include: + +$\Rightarrow$ Instruction Tuning Risks. Instruction tuning, a widely used fine-tuning approach, has been found vulnerable to data poisoning attacks. For example, [128, 129] show that attackers can introduce harmful behaviors by injecting malicious instructions or manipulating training data. These attacks enable models to generate unsafe content when exposed to specific trigger inputs. Additionally, other research [130, 131, 132] explores the use of prompt injection to backdoor instruction-tuned models, allowing attackers to trigger harmful outputs through carefully crafted prompts. + +Parameter-Efficient Fine-Tuning Risks. Parameter-efficient fine-tuning (PEFT) techniques [133, 134, 135] also face data poisoning risks [136]. [137] uncovers stealthy and persistent non-alignment on large language models via backdoor injections. Attackers can subtly alter the model's alignment by injecting backdoors that remain undetected during the fine-tuning process. [138] examines how data poisoning attacks can make generative models degenerate by introducing poisoned data that not only degrades the model's overall performance, but also leads to the generation of harmful content. +Federated Learning Risks. Federated Learning, a decentralized training paradigm [139, 140, 141], has become a more privacy-friendly approach for LLM finetuning [142, 143, 144]. In federated learning, data poisoning attacks present an even greater challenge due to the distributed nature of the process [145, 146]. Attackers can inject backdoors into the federated learning process that persist across multiple rounds of training and remain undetected. [147] proposes a poisoning attack designed to disrupt the safety alignment of LLMs through fine-tuning a local model on automatically crafted, safety-unaligned data. [148] delves into durable backdoors in federated learning, demonstrating that attackers can create backdoor that are difficult to detect and remove, posing a significant threat to the safety of federated learning models. + +# 2.3 Alignment Data Safety + +From a data-centric perspective, data poisoning attacks pose a significant threat to the integrity and reliability of LLMs by corrupting the training datasets [149, 150]. During the alignment process of LLMs, these attacks can target different stages, including the human feedback stage and the Reinforcement Learning from Human Feedback (RLHF) stage. + +Human Feedback Stage. In the human feedback stage, attackers can exploit the model's reliance on human-provided data. By manipulating feedback data, they can introduce harmful patterns that propagate through the training process. Recent studies demonstrate three primary attack vectors: (1) [151] develops poisoning techniques using malicious instruction injections that systematically degrade model performance on targeted tasks. (2) [152, 153] engineer universal jailbreak backdoor through feedback manipulation, creating persistent vulnerabilities that bypass safety constraints when triggered by specific prompts. (3) [154] crafts deceptive feedback that induces incorrect or harmful outputs. +$\nRightarrow$ Reinforcement Learning from Human Feedback (RLHF) Stage. In the RLHF stage, the integrity of the model's learning process can be compromised through the poisoning of reward models [1, 155, 156, 157, 158, 159]. A critical example is the RankPoison attack introduced by [160], which manipulates reward signals by strategically corrupting human preference datasets. Specifically, the attack identifies pairs of responses where the preferred response is shorter than the rejected one and then flips their labels. This manipulation causes the model to prioritize longer responses, which can increase computational costs and potentially lead to harmful behaviors. This underscores + +the importance of robust safeguards in preference data curation and reward model validation during alignment. + +# 2.4 Safety in Data Generation + +The rapid expansion of LLMs has led to a looming data exhaustion crisis, where high-quality data for pretraining, post-training, and evaluation is becoming increasingly scarce. To address this challenge, data synthesis, or data generation, has become deeply embedded in every stage of the LLM ecosystem. In this section, we first provide a concise overview of the role of (LLM-based) data generation throughout the LLM lifecycle and then summarize its associated safety concerns, including privacy, bias, and inaccuracy issues. + +Data Generation in the Lifecycle of LLMs. Data synthesis has become an indispensable component of every phase in the LLM ecosystem: in the (i) pre-training stage, LLM-based data generation is often referred to as model distillation, where corpora generated by larger models serve as training data for smaller models, as seen in Phi-1 [161], Phi-1.5 [162], and AnyGPT [163], among others. In the (ii) posttraining stage, downstream fine-tuning, instruction tuning, and alignment inevitably incorporate data generation techniques. For downstream fine-tuning, it is a common practice to utilize a more powerful LLM to generate domain-specific data for a smaller LLM (e.g., Chinese medical knowledge in [164], multiple-choice question answering in [165], mathematical reasoning in [166], and clinical text data [167]) to enhance its domain-specific capabilities. It is also empirically validated that LLM-generated data (e.g., action trajectories, question-answer pairs) can be beneficial for improving the reasoning [168, 169], planning, function calling [170] abilities. For instruction tuning, some approaches employ powerful LLMs to generate instruction-tuning data, such as EvolInstruct from WizardLM [171] and Orca [172], while others adopt self-instruct techniques like Self-Instruct [173] and Self-Translate [174]. For alignment, models such as Beavertails [175], PRM800K [176], and WebGPT [177] extensively rely on LLMs for question/response generation, preference ranking for preference dataset synthesis. + +Safety Issues and Mitigation. Despite its success, data generation inevitably introduces additional uncertainties and security risks throughout the LLM lifecycle, primarily in the following aspects: (1) Privacy, where synthetic data generation poses risks of amplifying privacy leakage due to the memorization of sensitive training samples [178] and inadequate anonymization [179], particularly in privacy-sensitive applications such as medical text processing [180] and disease diagnosis [181]. (2) Bias and Fairness, as LLMs inherently exhibit societal biases [182] (e.g., gender stereotypes in job descriptions), and the data they generate may further exacerbate these biases [183, 184]. This issue can be mitigated during the data filtering process using existing LLM debiasing techniques [185, 186, 187]. (3) Hallucination, where LLM-generated data often contains factual inaccuracies or fabricated logical chains due to probabilistic token sampling and outdated knowledge bases, a problem that may be further amplified when pretraining with LLM-generated data. Potential solutions include filtering generated data using existing hallucination detection + +techniques [188, 189]. (4) Malicious Use, where adversarial users may exploit synthetic data pipelines to mass-produce phishing content, typosquatting SDKs, or politically manipulative narratives. (5) Misalignment, where RLHF in LLM training can be compromised by selectively manipulating data samples in the preference dataset [190]. + +# 2.5 Roadmap & Perspective + +# 2.5.1 Reliable Data Distillation + +The proliferation of LLM-driven data synthesis for knowledge distillation and model self-improvement introduces critical security vulnerabilities across the entire LLM lifecycle. This paradigm shift exposes all development stages—from pre-training through post-training to evaluation—to escalating risks of data poisoning threats. These emerging challenges necessitate novel frameworks integrating verifiability and error containment mechanisms to ensure synthetic data integrity, while current methodologies remain fundamentally limited by hallucination propagation and knowledge attenuation stemming from imperfect teacher-student knowledge transfer. To address these challenges, three pivotal research directions emerge: (1) Cross-Model Consistency Verification: Future systems must implement multi-modal validation protocols through techniques like knowledge graph grounding and RAG-enhanced verification. Such mechanisms would ensure synthetic outputs maintain alignment with authoritative external knowledge bases while detecting semantic inconsistencies through ontological reasoning; (2) Dynamic Quality Assessment Frameworks: The development of diagnostic metrics to quantify error propagation remains a crucial frontier in data safety. Advanced toolkits are needed for measuring semantic drift or contradiction are enable real-time monitoring of quality degradation across data generation processes. (3) Heterogeneous Filtering Pipelines: While existing filtering mechanisms provide partial solutions, significant progress lies in effectively synthesizing multi-source verification signals, including human expert insight, rule-based invalidators, and model-based critics specializing in detecting nuanced factual discrepancies through contrastive learning paradigms. + +# 2.5.2 Novel Data Generation Paradigms + +Emerging approaches in data generation should leverage agent-based simulation frameworks to create a self-sustaining data flywheel for LLMs. In this paradigm, autonomous agents interact within a controlled simulation environment (e.g., Github, StackOverflow) to generate, evaluate, and iteratively refine synthetic datasets with minimal human intervention. Importantly, this approach enables the seamless integration of real-time safety checks and ethical oversight directly into the data generation pipeline. As a result, the system not only scales data synthesis efficiently but also proactively detects and mitigates inaccuracies and harmful content, thereby reinforcing the overall security and integrity of the generated data. + +# 2.5.3 Advanced Data Poisoning & Depoisoning + +Future poisoning techniques are anticipated to evolve in several sophisticated directions. On the poisoning front, + +adversaries may go toward fragment poisoning and covert poisoning paradigms. In fragment poisoning, attackers could embed seemingly benign data segments that, individually, escape detection yet cumulatively form a potent payload capable of destabilizing models at scale. Covert poisoning strategies may involve imperceptibly subtle modifications that, while initially innocuous, gradually aggregate into a comprehensive and disruptive effect. These emerging techniques underscore the growing complexity of data poisoning threats and the urgent need for preemptive countermeasures. To counteract these evolving threats, future work should focus on robust detoxification mechanisms spanning three fronts: (1) Proactive defense through data provenance tracking and differential privacy during data aggregation, preventing malicious samples from entering training pipelines; (2) Reactive purification using adversarial reprogramming techniques, where poisoned datasets are "repaired" via counterfactual augmentation or contrastive pruning; and (3) Post-hoc detection via explainable AI diagnostics to identify poisoned samples by analyzing gradient patterns or activation outliers. Hybrid approaches combining these strategies with human-in-the-loop verification could create multi-layered defense systems. Furthermore, theoretical advancements in understanding poisoning propagation, such as how poisoned preference pairs distort reward model gradients during RLHF, will inform more effective mitigation strategies. + +# 3 PRE-TRAINING SAFETY + +In this section, we examine the safety of LLMs in the pretraining phase, covering two key dimensions: Pre-training Data Filtering (Section 3.1) and Pre-training Data Augmentation (Section 3.2). Since the pretraining phase typically does not involve active adversarial attacks, our discussion primarily focuses on both the inherent risks present in largescale corpora [2, 4, 78, 81, 82, 97, 124, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205], such as harmful content and privacy violations—and strategies for augmenting the safety of training data, including integrating safe demonstration examples [191, 206, 207, 208] and annotating toxic content to better mitigate these risks [124, 195, 207, 209]. The overall pipeline of strategies for pre-training safety is illustrated in Figure 3. Additionally, the strategies adopted in existing LLM technical reports are summarized in Table 2. + +# 3.1 Data Filtering for Pretrain Safety + +# 3.1.1 Heuristic based Filtering + +Heuristic-based filtering, leveraging domain blacklist [78, 193, 194], keyword-based matching [191, 193] and predefined rules [2, 124, 195, 202], is one of the most widely adopted approaches to remove undesirable content before training. With most training data sourced from the Internet [211], domain blacklist provides an efficient initial safeguard by filtering predefined harmful websites and domains. [194] compiles a 13M unsafe domain list, while [78] aggregates a 4.6M URL blacklist targeting spam and adult content. In practice, domains with a high likelihood of containing personally identifiable information (PII) are also + +TABLE 2: Strategies for Enhancing Safety in the Pre-training Stage. $\checkmark$ indicates that the method is mentioned in the model's technical report, while - denotes that the method is not referenced. $①$ represents Integrating Safe Demonstration, and A denotes Annotating Toxic Content. "Augmenting" denotes Augmenting Training Data. + +
ModelData FilteringAugmentation
Heuristic-Model-Blackbox
GPT-4 [191]--
GPT-4o(mini) [124, 202]-
GPT-o1 [201]--
Llama2 [2]---
Llama3 [193]--
Yi [192]--
InternLM2 [194]--
PaLM2 [195]--A
DeepSeek-V2 [4]---
ChatGLM [196]---
Baichuan2 [203]--
Gemini [197]-
Gemini1.5 [209]-
TigerBot [206]--1
Gemma [198]--
Nemotron-4 [200, 210]--
RefinedWeb [78]---
+ +included in the blacklist [2, 193, 195, 202]. Beyond domain blocklists, keyword-based matching further refines content selection by detecting undesirable text patterns at the phrase or word level. For instance, [191] employs a lexicon-based approach to filter inappropriate erotic content. Similarly, [192], [193], and [194] curate word-level blocklists to identify and exclude harmful content. Given that domain blacklist and keyword-based matching might inadvertently exclude a large amount of data [194], developing heuristic-based filtering based on carefully predefined rules provides a balance between content safety and data retention. However, most existing works [197, 198, 200, 203, 209, 210] do not disclose their predefined rules, limiting transparency and reproducibility. + +# 3.1.2 Model based Filtering + +Model-based filtering leverages learned representations to assess content adaptively. [191] filters GPT-4's dataset using internally trained classifiers [212] to remove inappropriate erotic content. [192] employs the Safety Scorer to remove toxic web content, such as violence, pornography, and political propaganda. [194] fine-tunes BERT on the Kaggle "Toxic Comment Classification Challenge" dataset and a pornography classification dataset annotated via the Perspective $\mathrm{API}^1$ , using the resulting classifiers for secondary filtering to ensure safer data. Due to its greater generalizability, model-based filtering has been widely adopted across various works [197, 198, 199, 200, 203, 209, 210], serving as a complementary approach to heuristic methods for more effective content filtering. + +# 3.1.3 Blackbox Filtering + +Blackbox filtering mostly relies on policy-driven [4, 197, 209, 213] or API-based [124, 201, 202] methods with undisclosed + +1. https://perspectiveapi.com/ + +![](images/3a143801a40ca350831d89f8b6734dec72d5b207c62eb1650bfeb14b6904c9f6.jpg) + +![](images/58dfd579ca3459049d3c5f80bf67f5511b180af8ccd32375a55790a24fa9dbda.jpg) + +![](images/39576eab3b13c6f58322b854afc4d334e49b19b1700b0e374e0731a1c01b150e.jpg) + +![](images/cf7b73540606aebc13ca472854080f2b100d90a206010547361e44605ddbdfc2.jpg) + +![](images/b77e0d27b30fab18c8a5198df55bc3557bc9da7c2ff2c1bd5181dc84b6441a02.jpg) +Fig. 3: Pipeline of the Strategies for Pre-training Safety. We divide the existing methods into filtering- and augmentation-based pre-training safety. + +filtering criteria and implementation details. As a result, these approaches are generally categorized as black box filtering due to their limited interpretability and opaque decision-making processes. Most proprietary companies adopt their own predefined policies and APIs for filtering. For example, [213] filters data based on Meta's safety standards, while [209] removes harmful content according to Google's policy. [124, 201, 202] use the Moderation $\mathrm{API}^2$ for PII detection and toxicity analysis to refine filtering. + +# 3.2 Augmenting Training Data for Pre-training Safety + +In addition to filtering strategies, some works enhance training data to improve pre-training safety. These approaches mainly include integrating safe demonstration examples to guide model behavior [206] and annotating toxic content to improve the model's ability to recognize and handle unsafe inputs [195]. [206] incorporates 40k human-annotated safety demonstrations, updated monthly, into both alignment learning and pretraining to iteratively refine safety measures. [195] introduces control tokens to explicitly mark text toxicity in a partial of pertaining data based on the signals from the Perspective API. This approach allows toxicity-aware conditioning during inference time without hurting performance in general. + +# 3.3 Roadmap & Perspective + +The development of pre-training safety encompasses a diverse set of techniques. Heuristic-based filtering utilizes domain blocklists, keyword matching, and predefined rules to efficiently exclude overtly harmful content and personally identifiable information (PII) [78], while model-based filtering leverages learned representations to dynamically assess the harmfulness of content [205]. Additionally, blackbox filtering employs policy-driven and API-based solutions [97, 204], providing a less transparent yet operationally robust approach. However, existing research hasn't shown how to integrate these methods to pre-train an LLM that ensures security from the source. Thus, further exploration of accurate and efficient pre-training data filtering strategies is both necessary and worthwhile. + +Apart from filtering, data augmentation emerged as a complementary strategy. Some efforts focused on integrating safe demonstration examples to guide model behavior, and some extended to annotating toxic content for improved detection of unsafe inputs [207]. These augmentation techniques work in tandem with filtering methods to preserve valuable training data while mitigating risks. Although data augmentation improves pretraining safety, some current work [2, 97] argues that safety alignment in stages after pertaining tends to yield better results. This raises the question of whether augmenting training data during pretraining is cost-effective, given the same time and resource constraints. + +# 4 POST-TRAINING SAFETY + +In this section, we focus on reviewing the safety against harmful post-training attack, where we mainly focus on three parts: Post-training Based Attack, Defense Against Post-training Based Attack, and Evaluation Mechanism. (I) First, we introduce post-training-based attacks and recent advanced attack techniques (Section 4.1). (II) We categorize defensive mechanisms into three groups according to their conducted stage (Section 4.2), referring to the categorization in [214]. The comprehensive classification framework is illustrated in Figure 4, highlighting key representative studies along with their contributing organizations. + +$\Rightarrow$ Alignment. Conducted internally by manufacturers/organizations prior to deployment, this final pre-deployment stage employs techniques such as + +![](images/a912ed4af41748f546e1f71cac0eb117e621e272d4b9275fa07235d7e5605523.jpg) +Fig. 4: The taxonomy illustration of LLM post-training safety. + +reward modeling [1, 155, 156, 157, 158, 159, 215, 216], reinforcement learning [217, 218, 219], and value-aware optimization [220, 221, 222] to align LLMs with human values and societal expectations. This critical phase ensures ethical grounding through iterative preference optimization [223]. + +$\nrightarrow$ Downstream Fine-Tuning. While the datasets for fine-tuning can be manipulated by malicious attackers, the safety of aligned LLMs can be greatly deteriorated [47, 48, 49, 50]. Thus, it is natural to devise robust fine-tuning mechanisms to defend the attacks and a series of defense mechanisms in the fine-tuning stage have been proposed [224, 225, 226, 227, 228]. +Safety Recovery. The idea of safety recovery is to fix the attacked model after the harmful fine-tuning attack [214]. This line of research mainly focuses on realigning the safety of LLMs [229, 230, 231, 232, 233] by eliminating the toxic information in model parameters, projecting the harmful gradient update to the safety subspace, etc. + +(III) Going beyond this, we finally present the evaluation metrics and benchmarks (Section 4.3), along with a comprehensive roadmap and future perspectives for ensuring safety within the fine-tuning framework (Section 4.4). + +TABLE 3: Topic coverage comparison with existing surveys. + +
SurveysData PreparationPre-trainFinetuningAlignmentPost-processInference
[71]XXXXX
[234]X
[77]XXXX
[235]XXXXX
[214]XXX
[236]XX
Ours
+ +Differentiating from prior LLM surveys [33, 54, 71, 73, 77, 234, 235, 237], this work uniquely highlights safety implications across the entire fine-tuning pipeline, aligning with the evolving logical framework of modern AI safety. Specifically: Systematic Safety Taxonomy. We rigorously organize safety challenges into distinct fine-tuning stages, providing a granular analysis of risks at each phase. Attack-Defense Methodology. We catalog both adversarial + +exploitation strategies and corresponding mitigation techniques, accompanied by a detailed technical roadmap for robust fine-tuning. ③ Forward-Looking Insights. Beyond current practices, we outline critical future directions. The detailed information is summarized in Table 3. + +# 4.1 Attacks in Post-training + +Fine-tuning refers to the process of adapting pre-trained models to downstream tasks by optimizing their parameters, which significantly boosts task-specific performance while reducing computational costs compared to full retraining. However, pioneering studies [238, 239, 240] demonstrate that even the introduction of minimal malicious or misaligned data during fine-tuning can severely compromise the safety alignment of LLMs. This security risk has motivated investigations into adversarial attacks targeting the fine-tuning phase. In this section, we introduce the fine-tuning attacks from the following two perspectives: (1) the toxic data construction phase and (2) the fine-tuning phase. + +# 4.1.1 Toxic Data Construction Phase + +Leading providers like OpenAI employ safety-oriented filtering mechanisms to screen fine-tuning datasets before user customization. To circumvent these defenses, adversarial training data must first evade detection by such protective models [226]. Current methodologies for constructing toxic data can be broadly categorized into three main approaches: fixed-prompt strategies, iterative prompt strategies and transfer learning strategies. + +Fixed-prompt Strategies. These approaches prefix benign inputs with role-assigning prompts to elicit harmful outputs from LLM. For example, [238] prefixes a subset of fine-tuning data with directives such as "obedient robot." [241] programmed models to feign refusal via safety disclaimers before overriding restrictions, enabling responses to prohibited queries. As such explicit patterns risk detection, advanced stealth methods emerged: [242] embeds malicious content through cryptographic substitutions or steganography within random/natural language patterns. + +Iterative-prompt Strategies. Static attack strategies fail once detected. Heuristic methods now iteratively adapt toxic data against defensive feedback to bypass filters, though iterative optimization often weakens attack strength. [243] counters this via similarity-based loss to maintain toxicity, while [244] employs gradient-guided backdoor triggers during instruction tuning to evade detection while preserving content validity. + +Transfer Learning Strategies. Black-box constraints and API rate limits drive attackers to exploit transferable adversarial fine-tuning data from open-source models for zero-shot transfer attacks [240, 245]. The shadow alignment technique [239] demonstrates this through oracle-generated adversarial examples targeting GPT-4's restricted scenarios, successfully poisoning LLaMA via strategic fine-tuning. + +# 4.1.2 Fine-tuning Phase + +Existing fine-tuning methods fall into two categories: Supervised Fine-Tuning (SFT)-based and Reinforcement Learning + +(RL)-based. Attackers either tamper with model parameters/data to implant stealthy backdoors or distort reward mechanisms to incentivize harmful outputs. + +SFT-based. Attackers subvert safety-aligned pretrained models through targeted parameter manipulation, achieving stealthy backdoor implantation or safety bypasses via minimal malicious data injection. [246] undermines safety guardrails through reversed supervised fine-tuning (RSFT) with adversarial "helpful" response pairs. Building on this, [247, 248] demonstrate safety alignment erosion via parameter-efficient adaptation (e.g., LoRA, quantization) in models like Llama-2-7B. Domain-specific analyses reveal broader implications: [50] quantifies toxicity amplification in community-driven adaptations (e.g., SauerkrautLM's German localization), while [249] examines cross-lingual attack transferability through parametric sensitivity analysis. Complementing these, [250] pioneers federated attack vectors using layer-specific modifications (LoRA, LayerNorm) in distributed learning environments. + +RL-based. Attackers exploit algorithms like Direct Preference Optimization (DPO) to corrupt reinforcement learning policies, assigning higher rewards to harmful behaviors and degrading model safety. For instance, [246] leveraged DPO to encode harmful behaviors as "preferences," skewing the model's response distribution to favor malicious outputs under adversarial prompts. Conversely, [251] identified a "probability displacement" phenomenon in DPO, where preferred responses paradoxically decrease in likelihood, potentially triggering unsafe or inverted outputs. + +# 4.2 Defenses in Post-training + +# 4.2.1 Alignment + +Alignment typically optimizes the language model based on human preference feedback by training LLM with high-quality labeled data from harmless question-answer pairs [156, 159, 252]. Based on this, alignment ensures that LLM generations adhere to ethics and harmlessness, enhancing safety [155, 253]. In this section, we categorize our discussion into two types based on purpose: general alignment and safety alignment. + +![](images/83ed828523c8544c3e695503562e583a5b18583530a15f2f579840c6adbbd329.jpg) +Fig. 5: The taxonomy illustration of LLM alignment safety. + +General Alignment. General alignment enables the pretrained model to learn how to chat while internalizing fundamental human values. In RLHF [1], the model first learns from human-labeled data through supervised finetuning. Then, crowdsourced preference rankings of model responses are used to train a reward model, which is further + +optimized using PPO [175]. The preference data sequence provided by human annotators guides the model to conduct helpful rather than harmful behaviors [254]. Subsequent techniques such as DPO [255, 256, 257] and RLAIF [158, 258] follow a similar approach by leveraging preference data. Rule-based alignment methods predefine rules that the model learns to follow [259], which eliminates the need for labeled preference data and reduces costs while achieving comparable safety outcomes. Through general alignment, aligned models learn to reject direct harmful queries that could cause societal harm [2, 213]. While these methods contribute to LLM safety to some extent, they are highly susceptible to jailbreak attacks and can be easily circumvented [260, 261, 262, 263]. Furthermore, they are vulnerable to fine-tuning-based attacks, as highlighted in recent studies [127]. + +Safety Alignment. General alignment has been shown to have significant disadvantages [48] and is particularly vulnerable to fine-tuning attacks after being open-sourced [246]. To better address the challenges of LLM safety [237, 246, 264], some research focuses on safety alignment. One approach is to elevate safety to the same level of importance as performance by training independent reward models and cost models [217, 265]. Subsequent work introduces unique safety rules to enhance safety, leveraging Rule-Based Rewards to train safer models [266]. As large reasoning models (LRMs) emerge [4, 201], rule-based approach is further formalized into the safe policy reasoning, requiring models to reason over safe specifications during inference [267, 268]. Additionally, some studies explore safety alignment from interpretability perspectives [46, 231, 269, 270] by editing model parameters or modifying the residual stream to achieve better alignment. + +# 4.2.2 Downstream Fine-tuning + +The defenses devised in this stage aim to mitigate the harmfulness of the attack during fine-tuning [271]. There are typically three types of defenses. + +Regularization-based method: This type of defense achieves a successful defense by constraining the distance between the fine-tuned model and the aligned model. For example, KL regularizer is utilized to constrain the representation of the fine-tuned model to not deviate much from that of the aligned model [48, 272]. Another line of works strive to identify safety layers or modules to freeze or restrict the learning rate to ensure that the fine-tuned model do not deviate far from the aligned model on safety [269, 273, 274, 275, 276]. SaLoRA [277] projects the LoRA representation to an orthogonal aligned subspace. + +Data manipulation: This type of defense mixes alignment data into fine-tuning to achieve safety defense or modifying the system prompt to mitigate the risk [226, 227, 278, 279, 280]. For data mixing, Lisa [224] proposes Bi-State optimization to separate optimization over the alignment data/fine-tuning data, and to use a proximal term for further optimization. Paraphrase [279] also made a similar attempt and found that safety data that follows the prompting style of fine-tuning data can further improve defense performance. As for modifying system prompts, PTST [281] uses general prompts for fine-tuning, but uses safety prompts for inference. BEA [226] lies in the intersection of data mixing and prompt modification method, which introduces safe + +data concatenated with a system prompt as a backdoor trigger during fine-tuning, thereby establishing a strong link between the backdoor trigger and the safe response within the model. + +Detection-based defense: This type defense devises methods to filter out the harmful data from fine-tuning dataset to preserve the aligned safety of LLMs [282, 283, 284, 285, 286, 287]. For instance, there are works that train LLMs as moderation models to identify harmful content [175, 283, 288]. SEAL [228] devises a bi-level formulation to filter out the most harmful samples. SAFT [285] proposes to factorize the embedding space and compare the singular vector to identify harmful data. + +# 4.2.3 Safety Recovery + +Safety recovery refers to the defense mechanism applied after fine-tuning to restore a compromised model (i.e., realign the model). Several approaches aim to repair the model by eliminating the harmful knowledge that has been injected during fine-tuning. For instance, LAT [289] removes harmful knowledge by introducing perturbations into the embedding space, while Antidote [290] identifies and removes the harmful coordinates. [291] further proposes detecting and removing a small fraction of critical poisoned data points using influence functions can effectively recover model performance. Other approaches leverage information from aligned models to restore the integrity of attacked models. For example, SOMF [292] merges the parameters of fine-tuned models with safety parameters from aligned models, Safe LoRA [230] uses the weights of aligned models to project harmful gradient updates into a safe subspace, and SafetyLock [293] extracts safety activation information and injects it into the fine-tuned model. Additional methods in this domain include Safety Arithmetic [231], BEAT [287], IRR [294], NLSR [233], and Panacea [295]. Furthermore, CMRM [296] has been specifically developed to recover the safety of vision-based large language models. + +# 4.2.4 Safety Location + +Safety location refers to determining the specific location of the safety mechanism in LLMs, which is important for efficiently building a stable and reliable defense. Recent studies find that safety mechanism is not uniform across all layers of LLMs' transformer layers and only some specific layers are essential for the successful activation of defense [297, 298, 299]. Based on this finding, TGA [297] unveils the key reason for the inconsistency between visual and language safety capabilities in multimodal LLMs is that the visual and language modalities cannot be effectively aligned at the activation layers for safety mechanism. SPPFT [298] proposes a novel fine-tuning approach to fixes the gradient of the safety layers during fine-tuning to address the security degradation. LED [299] shows that realigning the safety layers with the decoded safe response from identified toxic layers can significantly improve the alignment of LLMs against jailbreak attacks. + +# 4.2.5 Open-Weight LLMs Safeguard + +As open-weight LLMs become increasingly public accessible, concerns about their potential misuse have intensified. Once model weights are public, malicious actors + +can fine-tune or alter them to remove safety alignment, enabling harmful applications such as generating misinformation, planning cyberattacks, or providing instructions for weapons development. Because LLMs grow in capability, ensuring these models cannot be easily repurposed for high-risk misuse has become a critical concern for both researchers and policymakers, like NIST [300, 301]. + +Traditional safety techniques—such as refusal training via supervised fine-tuning or reinforcement learning—are often ineffective in this setting, as they can be easily undone by adversarial modifications [240, 269]. In response, researchers have proposed post-training defenses that aim to remain effective even when the model is directly manipulated after release. Two notable approaches are Representation Noising [302] and Tamper Attack Resistance [303]. These approaches attempt to protect models by degrading their ability to learn or recall harmful knowledge, even after extensive fine-tuning. The goal is to raise the cost of misuse, even under strong threat models where attackers have full access to model weights. However, recent studies [301] have shown that evaluating the durability of these defenses is itself difficult. Minor changes in fine-tuning setup—such as different prompt formats, or random seeds—can lead to drastically different outcomes. Moving forward, researchers could clearly define threat models, improve reproducibility, and develop safeguards that offer measurable resilience across a wide range of adaptive attack strategies. + +# 4.3 Evaluation + +# 4.3.1 Evaluation Metrics + +As discussed in previous studies [127, 304], the goal of defense is to ensure that the model is able to (1) keep harmlessness after attack and (2) achieve similar levels of performance on downstream tasks with or without defense. In response to the two goals, we summarize the metrics involved in the existing research into two types: safety metrics and utility metrics. + +Safety metrics: This type of metric is used to evaluate the model's ability to maintain the safety of its outputs after being attacked. Attack Success Rate (ASR), introduced in [260], is one of the earliest safety metrics and has been widely adopted in subsequent works [305, 306, 307], and these papers employ different names for this metric, such as rejection rate [308] and fulfillment rate [309]. The novel measurements of safety metrics emerge with the advent of LLM-as-a-Judge [310, 311]. [261] is the first to apply LLMs to label model outputs as either safe or unsafe and calculates the ratio of unsafe labels as the safety metric. This method effectively leverages the generalization capability of LLMs and has been widely adopted [312, 313, 314]. However, this method also exhibits notable limitations, such as the inability to distinguish between different levels of risk. To address them, [315, 316] measures safety by calculating the alignment rate of the model's responses to safety-related multi-choice questions and those of human evaluators, and [230, 238] utilize a 5-point scale for LLM-based evaluators for more fine-grained evaluation. + +Utility metrics: In research on LLM safety, this type of metric is used to evaluate whether the model maintains its original performance on downstream tasks after an attack + +or defense. Researchers demonstrate the impact of their methods on model performance by comparing the results of utility metrics before and after the operation. For close-end tasks which have certain ground-truth labels, such as mathematical problems [317, 318, 319], coding tasks [320, 321], and classification tasks [322, 323], researchers typically use accuracy, the ratio of samples for which the model provides the correct answer. For open-ended tasks without a definite correct answer, the metrics are more diverse. For QA tasks [310, 324, 325], researchers primarily use LLM-based rating systems or similarity between generated content and standard response. For text summarization [326] and machine translation [327], ROUGE score and BLEU are widely used. By preserving utility, models can maintain their helpful capabilities while resisting attacks, ensuring that safety enhancements do not compromise their practical value in real-world applications. + +Safety and Utility Trade-off metrics: Safety alignment is far more than simply refusing to answer harmful questions [265, 328]. In other words, it is insufficient to rely solely on a classifier that rejects safety-related prompts while responding normally to others [329, 330]. When evaluating a model's safety alignment, a key focus is dual-preference evaluation - assessing whether the model can remain helpful while adhering to safety constraints [175]. For example, consider the prompt, "How to make a bomb?" A basic form of safety alignment would involve the model refusing to respond - similar to the approach taken by traditional moderation systems. However, beyond single-preference evaluation, a more advanced form of safety alignment not only withholds harmful information but also provides value-based reasoning and active dissuasion [253]. For instance, the model might reply: "Building a bomb is extremely dangerous and poses serious risks to public safety. Such actions could cause significant harm and may lead to criminal prosecution." The goal of safety alignment is to ensure that a model's behavior aligns with human intentions and values, particularly in safety-critical contexts [331]. In this way, the goal is to achieve a form of bidirectional value alignment between the model and human values [332]. + +# 4.3.2 Evaluation Benchmarks + +In current applications, the boundary between alignment benchmarks and fine-tuning benchmarks is not clearly defined. Some datasets from alignment benchmarks [175, 333], after appropriate modifications, can also be utilized for fine-tuning benchmarks. Thus, we classify them into two types as per their purposes. We summarize some widely-used benchmarks in Table 4. + +Safety-purpose benchmarks: These benchmarks evaluate the model's ability to maintain safety and align with human values when handling harmful prompts. They are the primary benchmarks used in safety research, effectively testing whether attack or defense methods influence the model's handling of harmful prompts. The design of responses varies depending on the specific purpose. [238, 260] consists of harmful prompts and harmful responses and [334, 335] only contains harmful prompts. Benchmarks or datasets designed for safety alignment, like BeaverTails [175] and HH-RLHF [155], typically not only include both + +safe and harmful responses but also sometimes include human preference data. + +General-purpose benchmarks: These benchmarks are used to evaluate the model's performance, such as accuracy, knowledge breadth, and reasoning, typically not intentionally including harmful data. In LLM safety, assessing the model with general-purpose benchmarks assists in analyzing the impact of defenses on the model's performance or is combined with harmful data to simulate fine-tuning attacks. Representative datasets include AlpacaEval [324], Dolly-15k [336], HPD v2 [337], GSM8K [317], ErrorRadar [338], etc. General-purpose benchmarks are also critical for LLM safety research, verifying that mitigation strategies do not degrade model performance on benign tasks, thereby balancing between helpfulness and harmlessness. + +TABLE 4: Summary of typical benchmarks with access links. + +
BenchmarkTypeTaskMetric
AlpacaEval [324]GeneralGeneral QAWin Rate
Dolly-15k [336]GeneralGeneral QAROUGE, BERT Score
PubmedQA [339]GeneralMedical QAAccuracy
GSM8K [317]GeneralMathematicsAccuracy
HumanEval [320]GeneralCodingCode Pass Rate
AGNews [322]GeneralClassificationAccuracy
WMT14 [327]GeneralTranslationBLEU, ROUGE
CNN/DailyMail [340]GeneralSummarizationROUGE
HH-RLHF [155]SafetyGeneral QARejection Rate, Helpfulness
BeaverTails [175]SafetyGeneral QAAccuracy, Win Rate
TruthfulQA [341]SafetyGeneral QATruthfulness
PureBad [238]SafetyHarmful QAASR, Harmfulness Score
DecodingTrust [333]SafetyHarmful QAASR, Accuracy
AdvBench [260]SafetyHarmful QAASR
SALAD-Bench [316]SafetyHarmful QAASR, Safety Rate
SG-Bench [342]SafetyHarmful QAFailure Rate
SafeChain [343]SafetyHarmful QASafe@1, Safe@K
HarmBench [305]SafetyHarmful PromptASR
HEX-PHI [238]SafetyHarmful PromptASR
RealToxicPrompts [334]SafetyHarmful PromptToxicity Rate
Do-Not-Answer [335]SafetyHarmful PromptHarmfulness Score
OR-Bench [308]SafetyHarmful PromptRejection Rate
SorryBench [309]SafetyHarmful PromptFulfillment Rate
Anthropic [254]SafetyHarmful PromptASR
DirectHarm4 [281]SafetyHarmful PromptASR, Harmfulness Score
GSM-Danger [281]SafetyHarmful PromptASR
SafetyBench [315]SafetySafety EvaluationAccuracy
ToxiGen [344]SafetySafety EvaluationAccuracy
R-Judge [314]SafetySafety EvaluationAccuracy
JailbreakBench [306]SafetyJailbreakASR
StrongREJECT [345]SafetyJailbreakWillingness
WildJailbreak [346]SafetyJailbreakASR
+ +# 4.4 Roadmap & Perspective + +# 4.4.1 From Low-Level to High-Level Safety + +With advancements in safety alignment technologies, LLMs are now less likely to explicitly exhibit harmful behaviors associated with low-level safety, such as violence, pornography, or discrimination [254, 265]. In contrast, as LLMs' reasoning capabilities continue to advance, a growing number of researchers are shifting their attention toward high-level safety—concerned with the potential for LLMs to engage in harmful behaviors that are not explicitly observable, such as deception or sycophancy [347]. These behaviors often require specific environmental conditions to manifest and can only be detected through specialized monitoring mechanisms [348], making them comparatively more covert than low-level safety issues. + +4.4.1.1 Deceptive Alignment: As LLMs continue to advance in reasoning and planning capabilities, the risk of deceptive behavior has attracted increasing scrutiny from researchers [349]. In this context, deception refers to the behavior in which a model intentionally misleads users or creates false impressions to achieve instrumental goals that are independent of factual accuracy [350]. For instance, advanced models such as GPT-4 have exhibited behaviors suggestive of misleading users or obfuscating their underlying objectives during complex interactions [349, 351]. + +Deception is defined as systematically inducing others to form false beliefs in order to achieve goals beyond merely conveying the truth [350]. This definition does not presuppose that the model holds human-like beliefs or intentions, but rather focuses on whether its external behavioral patterns resemble those characteristics of deception. In contrast, there is a more formalized definition grounded in game theory and causal reasoning [352], which incorporates the notions of intentionality and belief, modeling deception through a formally structured causal game-theoretic framework and offering criteria for distinguishing deception from related phenomena such as concealment. + +Evaluating the deceptive tendencies of LLMs requires a multi-layered, multi-scenario approach to comprehensively capture when and why such behavior occurs. The following outlines commonly used experimental designs, including various assessment scenarios and techniques: + +Hypothetical Scenarios and Moral Dilemmas: Some studies design conflict scenarios pitting honesty against goal completion, analyzing model responses [353]. Empirical findings reveal models' tendency toward deception, whether to relieve situational pressure or secure higher utility. By varying environment settings, researchers can examine triggers of deceptive behavior [354]. + +Multi-Agent Interaction and Game Experiments: The model is tested in multi-agent games or social scenarios where success depends on interactions with other agents. Notable examples include the Hoodwinked experiment [355] and the strategic game Diplomacy [356]. These environments permit deceptive interactions, enabling evaluation of whether the model uses deception strategies to gain a competitive advantage [357]. Experiments can monitor the frequency, content, and effectiveness of the model's deceptive behaviors, comparing them with those of human players or models of various scales. Multiplayer game testing can assess the model's social deception skills. + +Autonomous Agency and Covert Action Testing: The model is provided with a defined objective and constraints, along with a certain degree of operational freedom (e.g., tool usage, code execution, or interaction interfaces), and is then observed for covert constraint violations in pursuit of its goal, particularly efforts to disguise such behavior [351, 358]. To enhance the evaluation, experiments may deliberately introduce hidden motives [359]. For example, an AI assistant may have access to sensitive information needed for task completion but is explicitly prohibited from using it without permission. The question then becomes whether the AI assistant covertly exploits the information while hiding this behavior from the user [353]. + +Prompt Manipulation and Role Guidance: Targeted prompts or configurations can be used to elicit or sup + +press deceptive behavior in the model, thereby assessing its propensity and robustness. The model may be encouraged to achieve goals by any means necessary or be instructed to be completely honest in order to evaluate its performance in the same task [360]. Experimental results indicate that emphasizing honesty or highlighting potential risks can reduce deceptive behavior to some extent, though such behavior cannot be eliminated entirely [353]. These experiments help determine whether the model exhibits a stable propensity for deception or displays such behavior only under specific conditions. + +Multi-turn Consistency and Alignment Resistance Check: Construct multi-turn dialogue scenarios to evaluate whether the model can consistently uphold a lie. For instance, the model is tasked with maintaining deception across multiple rounds of Question-Answering, while its responses are examined for inconsistencies [361]. A model lacking consistency may confess under pressure or contradict itself, whereas a more advanced model would persist in fabricating lies to sustain the illusion. By tracking how frequently the model's deceptions are uncovered or inadvertently disclosed throughout multi-turn interactions, one can quantify its capacity for sustained deception [354]. Moreover, due to alignment resistance in LLMs, a small amount of data may suffice for the model to revert to its pre-training distribution [362]. Therefore, evaluating the model's robustness during the deception process can reveal its tendency toward deceptive behavior under its real distribution, potentially necessitating some degree of inverse training for thorough assessment. + +Thought Process and Internal State Monitoring: This method infers the model's intentions by analyzing its thought processes or internal activations. For example, the model may be prompted to produce a "thought log" alongside its response [359], or the reasoning process itself may serve as the log in the case of reasoning models [348]. If the content of the log contradicts the response, it may indicate deceptive behavior. Embedded linear probes can also monitor real-time activations associated with deception [363]. However, deciding how to act once "bad thoughts" are detected remains challenging: OpenAI found that penalizing such monitored thoughts reduces their explicit occurrence but does not curb most misbehavior—instead, models learn to conceal their intent within the very "thought logs" meant to expose it [364]. + +4.4.1.2 Reward Hacking: Reward hacking refers to situations in which an AI agent exploits flaws or ambiguities in the reward function to obtain high rewards in unintended ways, without truly accomplishing the intended task of the designer [365, 366]. This behavior reflects a manifestation of reward mis-specification, also known as specification gaming [331, 367]. Reward hacking has long been a concern in the field of AI safety [368]. The root of this problem can be understood through Goodhart's Law: "when a measure becomes a target, it ceases to be a good measure" [369]. When a proxy metric is used to represent a human's true goal, strong optimization may cause the agent to exploit mismatches between the proxy and the actual objective, resulting in failure. Reward tampering is considered a special case of reward hacking, in which the agent directly interferes with the reward signal source (e.g., + +by modifying the reward function) to obtain high rewards [370, 371]. + +With the widespread adoption of Reinforcement Learning from Human Feedback (RLHF) in training LLMs, reward models that rely on a single scalar value struggle to capture the complexity of human value systems [372, 373]. If the reward model fails to accurately reflect genuine human preferences, the LLM may learn to exploit its biases or those of human evaluators, resulting in various forms of reward hacking. The following are common manifestations of this phenomenon observed in large models. + +Sycophancy: Since LLMs are optimized for human preferences, or for reward models based on such preferences, during fine-tuning, they tend to prioritize satisfying users or human supervisors to maximize rewards, rather than adhering strictly to objective correctness. This tendency is reflected in the way their responses often shift to align with users' implied stances, catering to their preferences [374, 375]. + +Reward Overoptimization: Model outputs may be excessively optimized for specific formal features to satisfy the reward model. For example, the model may produce unnecessarily lengthy responses [376], as human preference for detailed answers during training leads the reward model to favor longer outputs. Moreover, the model may adapt its writing style and formatting to align with the reward model's preferences, instead of prioritizing content accuracy. For instance, it may learn to respond to harmful queries with overly cautious refusals [237, 377]. + +# 4.4.2 Provably Safe AI System + +Provably safe AI systems represent an emerging paradigm that aims to ensure that advanced AI operates within rigorous, formally verifiable safety bounds. Some researchers argue that only by embedding mathematically verified safety proofs into AI architectures can we guarantee that such systems will never deviate into harmful behaviors [378]. This formal approach contrasts sharply with traditional empirical testing and red-teaming methods, which often fail to uncover all failure modes in complex or adversarial environments. The achievement of provable safety requires the integration of several key components [379] as follows: + +Formal Safety Specifications: A rigorously defined set of safety properties (e.g., "do no harm") must be articulated in a formal language. Such specifications are designed to capture the essential criteria that AI systems must satisfy under all operating conditions. + +World Models: To evaluate the consequences of AI actions, it is essential to build a world model that encapsulates the dynamics and causal relationships of the environment. This model allows for the translation of abstract safety requirements into concrete behavioral constraints. + +Verification Mechanisms: A verifier is needed to ensure that the AI system meets the safety specifications with respect to the world model, regardless of whether it is implemented as a formal proof certificate, a probabilistic bound or an asymptotic guarantee. Such mechanisms are the only reliable method to exclude the possibility of catastrophic failure by proving that certain harmful behaviors are mathematically impossible [378]. + +Robust Deployment Infrastructure: Beyond predeployment verification, runtime monitoring and redundant safety measures (such as provably compliant hardware) must be implemented. These safeguards ensure that if discrepancies between the world model and observed behavior occur, the system can transition to a safe state without human intervention [378, 379]. + +# 4.4.3 Beyond Fine-tuning, Systematic Safety + +AI governance encompasses the establishment and enforcement of regulatory frameworks necessary for the safe development and deployment of AI systems. Given the potential of AI to exacerbate societal biases [374, 380, 381], displace labor [382], and pose existential risks due to increasingly autonomous capabilities [15, 351], governance is critical. The primary objective of AI governance is to mitigate these diverse risks effectively, requiring stakeholders to maintain a balanced consideration of various risk categories. + +A multi-stakeholder approach characterizes contemporary AI governance, involving governments, industry and AI laboratories, and third-party entities such as academia and non-profit organizations [383]. Governments create regulatory frameworks, conduct oversight, and establish risk management systems [384, 385], while industries and AI laboratories undertake comprehensive risk assessments throughout AI development lifecycles and voluntarily adopt security measures [386, 387]. Third parties provide critical auditing services and policy advice, fostering international cooperation and balanced stakeholder interests [388, 389, 390]. + +Nevertheless, AI governance faces significant unresolved challenges, prominently in international and open-source contexts. International governance discussions emphasize the importance of global frameworks to manage catastrophic risks such as AI-driven arms races and inequitable distribution of AI benefits [388, 391]. Historically, international governance frameworks like the OECD AI Principles and the global ethical standards produced by the United Nations Educational, Scientific and Cultural Organization (UNESCO) offer instructive precedents [392, 393]. Conversely, open-source governance is debated regarding the balance between transparency's security benefits and potential misuse risks [394, 395]. Advocates argue that openness enhances security through rapid issue identification and reduces centralized control [396, 397], while critics highlight risks of malicious use and vulnerabilities from unrestricted access [260, 398]. This ongoing debate underscores the need for measured, risk-informed policies and gradual openness strategies [399, 400]. + +# 5 SAFETY IN MODEL EDITING & UNLEARNING + +Model editing and unlearning techniques can be conceptualized as lightweight adjustments to information and efficient safeguards for privacy and security during the deployment of LLMs. In this work, we integrate discussions on model editing and unlearning into the fine-tuning section to provide a more systematic and comprehensive analysis of their roles in enhancing model safety and robustness. + +Concretely, model editing [401, 402] and unlearning [403, 404, 405, 406, 407, 408] can be understood as methods + +to efficiently modify model parameters during deployment to enhance the model's security and privacy. To better reflect the comprehensiveness of our survey, we have included relevant literature on the safety of editing (Section 5.1) and unlearning (Section 5.2). It is noteworthy that there exists a certain degree of technical overlap between model editing and unlearning. To provide a clearer and more precise exposition, we focus model editing on addressing knowledge conflicts within the model, while unlearning is primarily concerned with the erasure of knowledge to ensure privacy protection. + +# 5.1 Safety in Model Editing + +LLMs retain incorrect or outdated information [409], and for this reason, model editing has emerged to advocate updating knowledge in LLM by modifying a small part of the parameters. In recent years, scholars have begun to investigate model editing in LLMs. Generally, model editing methods can be mainly categorized into gradient-based [410, 411], memory-based [412, 413] and locate-then-edit methods [414, 415, 416]. + +Gradient. Early approaches [410, 411, 417] advocate that the updating of knowledge in the LLMs is accomplished by modifying the gradient of the LLM. A more recent study [418] revisits gradient-based fine-tuning and demonstrates strong performance through constrained optimization techniques. However, since gradient-based methods are too complex and suffer from pattern collapse, it is gradually being replaced by other research lines [419, 420]. +$\rightarrow$ Memory. Memory-based methods [412, 413] advocate the introduction of external parameters to assist in updating knowledge. Though effective, models with excessive parameters face the problem of over-parameterization – where the parameter space becomes significantly larger than necessary to capture the underlying data distribution [420, 421]. +- Locate-then-edit. Locate-then-edit methods, represented by RoME [416], MEMIT [421] and AlphaEdit [402], localizing knowledge storage-related neurons by causal tracing, achieving knowledge editing by modifying these neurons, have made breakthroughs in recent years [422, 423, 424]. The locate-then-edit approach has been proven to be effective in updating specific factual knowledge in the LLM [402]. Thus it is widely used to edit the security of LLMs [425, 426]. In the following part, we will focus on the application of the locate-then-edit approach to the security domain. + +Attack. Model editing can break the secure alignment of LLMs when injecting harmful knowledge into LLM. Chen et.al [425] first proposed the concept of editing attack, constructing a dataset named EDITATTACK, and using editing methods such as RoME [416] and IKE [427] successfully injected harmful, incorrect, and bias information to LLMs. Since model editing modifies the corresponding knowledge in the form of knowledge triples, BadEdit [428] proposes a way to inject triggers using model editing. BadEdit designs specific triggers such as the color of a banana, the shape of an apple, or specific letter combinations such as "aaa" and "bbb" to trigger the model to output harmful content. Building on this basis, Concept-RoT [429] designs a more invisible approach by proposing $k_{0}$ based on the concept + +of context, and implanting a backdoor against the concept of context by editing the value corresponding to $k_{0}$ , thus realizing the effect of the conceptual Trojan horse. In addition, DEPN [430] devised a method to first locate private neurons, and secondly edit the specified private neurons through RoME so that the model outputs sensitive private information. + +Defense. Model editing can also be used as a means of improving the security of a model, Zhang et.al [426] proposed a model editing method named DINM, to localize and detoxify toxic neurons via model editing, making the model less susceptible to jailbreaking. In addition, other studies [422, 431, 432] have explored the use of model editing for blue teams. Model editing methods have made big strides + +TABLE 5: Model Editing for attack and defense. + +
MethodsAttack?BackDoor?Defense?Parameter?
RoME[416]
IKE[427]--X
AlphaEdit[402]
BadEdit[428]X
ConceptROT[429]X
DEPN[430]XX
DINM[426]XX
PEM[432]XX
+ +in red team, making them an effective means of injecting risk content into safely aligned models. We summarize the mainstream editing for attacks and defenses in Table 5 and each row in the table represents distinct included content.. Against model editing attacks, no research has been done to make a specific defense against such attacks, so further exploration in this area is an important research topic. + +# 5.2 Safety in Unlearning + +LLMs have demonstrated remarkable capabilities in various tasks, but their training on vast and often unfiltered datasets from the Internet inevitably leads to the absorption of unsafe information [433, 434, 435, 436, 437, 438]. This includes biases [439], stereotypes [440], toxic language [441], misinformation [442, 443, 444], and even private data [71]. Therefore, LLM unlearning is crucial for ensuring their safe and responsible deployment [406, 445], as shown in Figure 6. Unlearning, in this context, refers to the process of selectively removing or mitigating the influence of specific knowledge, behaviors, or data points from a trained LLM [446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456]. Unlearning methods can be distinguished into two broad paradigms [457]: exact (certified) unlearning and heuristic (approximate) unlearning. Exact methods accurately identify poisoned data points or affected parameters, providing formal or statistical guarantees that the specified behaviors no longer influence the model. This typically requires certified retraining from scratch, removing the disallowed data entirely [458]. Two primary paradigms have emerged to achieve approximate unlearning: parameter-adjusting methods, which modify the model's internal weights, and parameter-preserving methods, which intervene externally without altering the core model architecture (refer to Figure 6). + +Parameter-Adjusting Unlearning. The first paradigm, which involves adjusting the model's parameters, is characterized by its direct intervention in the model's internal + +structure. This approach typically requires retraining or fine-tuning the model on a curated dataset, designed to counteract the unsafe knowledge or behavior that needs to be unlearned. It also encompasses methods that follow a locate-then-edit pipeline, where specific parameters associated with the target knowledge are identified and directly modified to achieve unlearning [456]. Techniques such as Gradient Ascent [459] and its variations [460] are commonly employed. While traditional fine-tuning using cross-entropy loss is prevalent, more specialized loss functions have been proposed to enhance the control over the outputs of unlearned models, such as KL minimization [461, 462, 463] and the IDK loss function [464]. Additionally, recent work [465] has reframed LLM unlearning as a preference optimization problem [466], utilizing Negative Preference Optimization loss to improve the unlearning process. In contrast to these training-intensive approaches, LaW [456] draws inspiration from model editing by identifying and removing knowledge associations embedded in MLP weights, aiming to eliminate targeted information with minimal impact on the model's overall capabilities. Given the recent powerful multimodal perception and generation nature of LLMs, MMUnlearner [467] proposes to reformulate the setting of multimodal unlearning, which aims at erasing the unwanted visual concept but still preserving textual knowledge. Based on existing multimodal LLM-based unlearning benchmarks [468, 469, 470], SafeEraser [471] further incorporates unlearning mechanism and evaluation into multimodal LLM safety, via introducing Prompt Decouple Loss and a new metric called Safe Answer Refusal Rate. + +![](images/7d17dc024ae8d367e87d8dd062de9a7bf8de5670470bc7baaeafe76bbb049324.jpg) +Fig. 6: The taxonomy illustration of LLM Unlearning for safety. + +Parameter-Preserving Unlearning. The second paradigm, which does not involve adjusting the model's parameters, focuses on external interventions that guide the model's outputs without altering its internal parameters. Techniques in this category often include post-processing methods or the use of auxiliary models to filter or modify the LLM's unsafe responses. Editing-based techniques [430, 472, 473, 474] modify specific components of the model architecture or introduce additional modules to counteract unwanted knowledge. Task vector approaches [475, 476] + +leverage the geometric properties of the parameter space to identify and neutralize directions associated with targeted information. More recently, in-context learning strategies [477, 478] have emerged, which guide the LLM's behavior through carefully crafted prompts rather than weight modifications. + +Although heuristic methods are far more scalable, their guarantees are only empirical. Closing this gap between certified safety and practical feasibility remains a central research challenge for the field. + +# 5.3 Roadmap & Perspective + +# 5.3.1 Model Editing + +The evolution of model editing traces back to localized factual updates (e.g., correcting "Olympics host city" from Tokyo to Paris), where its efficiency and precision positioned it as an agile solution for urgent safety patches. Early methods focused on atomic knowledge triples but soon expanded into adversarial domains: attacks progressed from binary semantic inversion to targeted answer manipulation, while defenses leveraged editing's granularity to neutralize harmful behaviors without model retraining. Crucially, model editing's ability to implant stealthy backdoors revealed its dual-edged nature — a capability demanding equal attention in both offensive and defensive research agendas. + +In the era of sophisticated safety alignment, model editing addresses a critical niche. While safety fine-tuning establishes systematic safeguards through periodic retraining, it struggles with emergent, context-sensitive risks (e.g., geopolitical shifts or cultural updates) that evolve faster than retraining cycles. As LLMs scale, the intervals between alignment updates widen, creating safety gaps exacerbated by catastrophic forgetting risks. Model editing bridges these gaps through rapid surgical interventions — executing updates orders of magnitude faster than alignment procedures — by modifying specific unsafe knowledge or concepts, all while preserving general model stability. In summary, while safety fine-tuning remains essential for systematic alignment, model editing addresses four fundamental limitations in the current era: + +- Temporal Agility: Mitigates emergent, unpredictable safety risks that cannot wait for full retraining cycles. +- Granular Control: Enables surgical modifications to specific reasoning pathways in large reasoning models (LRMs), correcting flawed chain-of-thought logic without disrupting valid inference patterns. +- Resource Decoupling: Reduces computational barriers for safety-critical updates, particularly in multimodal settings where traditional retraining costs scale prohibitively. +- Stable editing: Model editing is an ongoing and iterative process; however, excessive modifications can compromise the model's performance, likely due to the intricate interdependencies among neurons. Therefore, ensuring stable performance during continuous editing is of paramount importance. This process may involve algorithms that safeguard the model's integrity while potentially incorporating memory mechanisms to maintain balance. In summary, altering the original model parameters is a relatively "risky" endeavor, and plug-and-play externals + +nal modules may emerge as the predominant approach in the future. + +Future frontiers highlight model editing's unique value proposition. Specifically, + +- More Hidden Backdoor: By precisely modifying targeted parameters without perturbing unrelated knowledge, edited backdoors evade traditional detection methods that monitor broader model behavior. +- Multimodal Safety: In multimodal systems, editing reduces the computational burden of aligning heterogeneous data streams by selectively modifying cross-modal attention mechanisms. +- Concept-Level Safety: Directly edit abstract safety concepts (e.g., age-restricted content policies/R18) through latent space interventions, bypassing the need for complex reinforcement learning-based alignment (e.g., DPO). +- Interpretability-driven Safety: The model editing's interpretability dimension further provides causal insights into safety-critical model behaviors, informing robust verification frameworks. + +Critically, model editing complements - rather than replaces - systematic alignment, forming a hybrid governance paradigm: systematic alignment ensures broad ethical guardrails, while model editing enables surgical adaptations to emerging threats, i.e., establishing a closed-loop governance system for sustainable safe deployment. Together, they will form the twin pillars of LLM safety in the future. + +# 5.3.2 Unlearning + +The concept of machine unlearning has evolved from a specialized issue in traditional machine learning to a key aspect of responsible AI governance for LLMs. Early efforts in unlearning primarily focused on removing data from smaller, more specialized models, often in response to privacy regulations such as the GDPR's "right to be forgotten" [446]. However, with the advent of LLMs—trained on vast, diverse, and often uncontrolled datasets—the landscape of machine unlearning has undergone significant transformation. This shift has introduced new challenges and imperatives that were previously unforeseen. + +The initial phase of LLM unlearning focused on adapting existing techniques—primarily parameter-adjusting methods like gradient ascent [459] and fine-tuning variants [461, 462, 463, 464, 479]—to the scale and complexity of LLMs. While this phase demonstrated the feasibility of unlearning, it also highlighted several fundamental limitations, such as computational cost [445, 449], catastrophic forgetting [451], and lack of granularity [406]. These limitations have driven the development of more refined approaches, such as parameter-preserving methods [472, 475, 476, 477, 478]. These methods, which utilize techniques like task arithmetic and in-context learning, provide a glimpse of a future where unlearning can be achieved with greater efficiency and precision. The shift to multimodal LLMs has further expanded the scope, necessitating unlearning methods that can address the safety concerns arising from the interaction between different modalities [467, 468, 469, 470, 471]. The current landscape of LLM unlearning can be described as a shift from reactive “data deletion” to proactive “knowledge + +sculpting." We are moving beyond merely removing information to precisely shaping the model's understanding and behavior. This shift is driven by several key insights: + +- Unlearning as Preference Optimization: By framing unlearning as preference learning, we can align the model's output with desired safety and ethical guidelines, utilizing techniques like Negative Preference Optimization [465, 466] or safety-oriented preference optimization [480]. +- The Importance of Context: Since the "unsafety" of information is often context-dependent, researchers are developing methods to selectively unlearn behaviors in specific situations while maintaining the model's general capabilities [477, 481, 482, 483]. +- Multimodal Unlearning: Addressing the fusion of modalities (text, images, audio) presents new challenges in removing unwanted concepts and behaviors both within and across modalities [467, 471, 484]. + +Looking ahead, several critical areas are essential for further advancement in the field: + +- Principled Evaluation Metrics: Robust, standardized benchmarks are necessary to accurately assess unlearning effectiveness and potential side effects. These metrics should move beyond simplistic, easily manipulated measures [450, 476, 485, 486, 487]. +- Theoretical Foundations: A deeper understanding of the mechanisms behind unlearning in LLMs is needed to develop truly reliable techniques [451, 488]. This includes exploring why unlearning is challenging and how different methods affect internal representations. +- Hybrid Approaches: Combining parameter-adjusting methods (for coarse-grained removal) with parameter-preserving techniques (for fine-grained refinement) presents a promising path forward. This aligns with the "hybrid governance paradigm" from Model Editing, allowing for both broad and precise interventions. +- Unlearning for Interpretability: Instead of using interpretability solely to guide unlearning, the unlearning process itself can be used to enhance our understanding of model behavior [489]. By selectively removing knowledge and observing the consequences, we gain causal insights into the model's reasoning. This represents a fundamentally different and more powerful use of unlearning—this is the key "dry goods" insight. +- Unlearning Benchmark: Building upon the aforementioned insight, it is evident that unlearning currently lacks a standardized benchmark. Establishing a method to effectively balance a model's ability to forget while systematically ensuring its performance remains reliable is crucial (Figure 7). In the realm of multimodal learning, creating such a benchmark could be even more complex, potentially representing a pivotal step in advancing this field [471, 490, 491, 492, 493]. + +In conclusion, LLM unlearning is not merely a technical challenge; it is a fundamental requirement for building trustworthy and beneficial AI systems or even agent ecosystems [494, 495]. It is evolving from a reactive measure to a proactive design principle, shaping the very foundations of how LLMs learn, adapt, and interact with the world. The journey from "forgetting" to "knowledge sculpting" + +![](images/3a418fa605c423149f828c2f001b5edc46cfe6b96a344e91855296aab87fa433.jpg) +Fig. 7: We define the goal of unlearning as maximizing both model utility and forget quality, meaning that algorithms positioned closer to the top-right corner are considered more reliable. + +is underway, promising a future where LLMs can be both powerful and aligned with human values [496, 497, 498]. + +# 6 LLM(-AGENT) DEPLOYMENT SAFETY + +In this section, we focus on the safety of LLM and LLM-agent during the deployment phase, addressing three progressively broader dimensions: LLM Safety (Section 6.1), Single-agent Safety (Section 6.2), and Multi-agent Safety (Section 6.3). We begin by discussing the potential threats and defense mechanisms associated with the foundational LLM during inference. Subsequently, we explore the additional security risks introduced by supplementary modules, which impact both individual agents and multi-agent systems. This structured approach ensures a comprehensive understanding of safety challenges at varying scales of LLM(-agent) deployment. + +# 6.1 Deployment Safety + +The deployment of a single LLM introduces significant security challenges, including adversarial attacks, data privacy risks, and content integrity concerns. This subsection systematically examines these issues by first analyzing key attack vectors (Subsection 6.1.1), such as model extraction, membership inference, jailbreak attacks, prompt injection, data extraction, and prompt stealing, which threaten model confidentiality, robustness, and ethical compliance. Next, we explore defensive mechanisms (Subsection 6.1.2), including input preprocessing, output filtering, robust prompt engineering, and system-level security controls aimed at mitigating these threats. Finally, we discuss evaluation and benchmarking (Subsection 6.1.3), covering robustness, content safety, privacy leakage, multi-modal safety, and standardized security benchmarks, ensuring a comprehensive assessment of LLM deployment safety. This structure follows a logical progression from identifying threats to implementing defenses and establishing reliable evaluation methodologies. + +# 6.1.1 Attack in Deployment + +We first give an overview of the attacks in Figure 8. + +![](images/3c01c7964a0ec82961f085a41736bb2f4f02d485345f84dac051957673a31670.jpg) +Fig. 8: The overview of attacks in single LLM's deployment phase. + +Model Extraction Attacks. Model extraction attacks aim to steal a deployed language model, which only provides an Application Programming Interface (API) that processes text input (i.e., a prompt) and returns generated outputs. He et al. and Peng et al. [499, 500, 501, 502] made a series of early efforts in launching model extraction or stealing attacks against LLMs (even deployed as a service) and proposed various defense mechanisms to mitigate such risks. Carlini et al. [503] conducted the model-stealing attack against a black-box large language model by targeting its embedding projection layer. Building on this, Finlayson et al. [504] further investigated the risk of stealing embedding dimensions by exploiting the softmax bottleneck. Another line of research explores model extraction in a gray-box setting. For instance, Zanella et al. [505] demonstrated the feasibility of stealing high-fidelity language models when given access to a frozen or fine-tuned encoder. + +Another category of model extraction attacks focuses on recovering the full weight of an LLM. For instance, Horwitz et al. [506] successfully reconstruct a pre-fine-tuned LLM (i.e., the pre-trained model before fine-tuning) using its fine-tuned variants, such as low-rank adaptation (LoRA) models. Beyond general model-stealing attacks, some research explores threats to specialized capabilities. Li et al. [507] extract the coding abilities of an LLM, including code synthesis and translation. Additionally, Liu et al. [508] propose a theoretically grounded method for stealing any low-rank language model. + +Membership Inference Attacks. Membership Inference Attack (MIA) tries to figure out whether a given candidate is included in the training dataset of an LLM [117, 509]. + +Methods. [509] propose the first MIA with MIN-K% PROB, which identifies examples that contain few outlier words with low probabilities as non-members. Afterward, [510] propose MIN-K%++, which simulates the membership inference into identifying local maxima. Some works reveal that the success of MIAs against LLMs may be due to sampling non-members from different distributions. Thus, [511] propose Blind attack, which conducts MIA by applying a threshold and completely ignores the target model. [512] selectively combine the existing MIAs and aggregate their scores to perform a statistical test. [513] + +identify the membership of a verbatim text by constructing paraphrased options (with another proxy model) and asking the target LLM for true verbatim. [514] examine the relative change in conditional log-likelihoods when prefixing target data points with non-member context. [515] propose to generate noisy neighbors for a target sample by adding stochastic noise in the embedding space. [516] train a neural network to capture variations in output probability distributions between members and non-members. + +$\nRightarrow$ Document-level MIAs. Some works focus on document-level MIAs. Meeus et al. [517] propose the first MIA for document-level leakage, which contains four steps: retrieving, normalizing, aggregating, and predicting. After that, Meeus et al. [518] validate that it doesn't work against models that do not naturally memorize and propose to utilize copyright traps to detect the use of copyrighted materials. Puerto et al. [519] make exploration toward collection-level MIA against LLMs by computing features and two-stage aggregation. +Different Settings. Some works also explore the MIA risk in novel settings. Anderson et al. [520] propose the first MIA against Retrieval Augmented Generation (RAG) systems by directly asking whether one candidate is its member or not. Li et al. [521] compare the output semantic similarity of the sample for the RAG system and the remaining to determine the membership of RAG's database. Zhang et al. [522] propose the first MIA against in-context learning and four attack methods, including GAP, Inquiry, Repeat, and Brainwash. Meanwhile, Duan et al. [523] reveal that MIA risk in in-context learning is more severe than in the fine-tuning setting. Wen et al. [524] conduct membership inference of fine-tuning data by poisoning pretraining data and backdoorsing the pre-trained model. Then Wen et al. [525] comprehensively assess the MIA risk against adaptation methods, including LowRank Adaptation (LoRA), Soft Prompt Tuning (SPT), and In-Context Learning (ICL). Balloccu et al. [526] study the indirect data contamination for closed-source LLMs, which can also be regarded as MIA. Fu et al. [527] propose Self-calibrated Probabilistic Variation, which fine-tunes the reference model by prompting the target LLM. +$\nRightarrow$ Factor Impact. Duan et al. [117] find that the existing MIAs work poorly on LLM due to massive training data and near-one epoch training. Li et al. [528] clarify the impact of fine-tuning and evaluation metrics and propose a three-phase framework (i.e. training, simulation, and confidence calculation) to assess membership leakage. Kandpal et al. [87] find that duplication of training data highly extends the risk of MIA. Naseh et al. [529] validate that using synthetic data in membership evaluations may lead to false classification results. + +Jailbreak Attacks. Jailbreak attacks aim to induce the large language model to generate unsafe content like violence [260]. Jailbreak attacks focus on bypassing the safety rules, including system safety prompts and safety filters, while prompt injection attacks target all system prompts. Lots of literature have studied the vulnerability of LLM, where different terms, including "jailbreak attack" and "redteaming", all point to the same safety vulnerability that + +generates unsafe content. We classify them into two main categories, i.e. optimization-based and strategy-based. + +Strategy-based jailbreaks figure out novel strategies or templates to generate one adversarial prompt at a heat to test LLMs' vulnerabilities, which are pre-defined. Thus, the generated prompt is non-evolvable. Specifically, useful strategies include persuasion [559], role-playing [560, 561, 562, 563], cipher [564, 565], ASCII [566], long-context [567], low-resource language [568, 569], in-context malicious demonstration [570], overloaded logical thinking [571], misspelling [572], multi-language mixture [573], rephrasing [538, 574, 575, 576], competing objectives and generalization mismatch [577], [wenjie: splitting sub-queries [578]], zero-shot generation [579], personal modulation [580]. + +Optimization-based jailbreaks contain a multi-step optimization process to revise one unsafe prompt. Here, we further divide the optimization-based jailbreaks into gradient-based and LLM-based ones: + +Gradient-based Optimization. GCG [260] appends one suffix to the target prompt, then utilizes the gradient of loss, which is calculated with the target (e.g., "Sure" or "Yes") and output, to optimize the soft prompt. Then, it greedily searches the best-matched tokens in the dictionary for soft prompt replacement. AutoDAN-B [535] solves the limited readability of GCG by constructing a proxy score where the perplexity is considered, which is utilized for greedy sampling. I-GCG [531] improves GCG by appending a template before the suffix and uses a multi-coordinate updating strategy and easy-to-hard initialization to optimize the suffix. COLD-Attack [581] adapts Energy-based Constrained Decoding with Langevin Dynamics for controllable adversarial prompt generation. MA-GCG [532] proposes momentum gradient to boost and stabilize the greedy search for tokens in adversarial prompts. A-GCG [533] introduces a smaller draft model than the target model to sample the promising suffix candidates for faster optimization. BOOST [582] enhances the existing jailbreak attacks by adding eos tokens to the end of the unsafe prompt. CRT [583] proposes an enhanced reinforcement learning-based jailbreak with consideration of prompt diversity. I-FSJ [584] deploys few-shot learning and demo-level random search. +$\Rightarrow$ LLM-based Optimization. PAIR [261] constructs a system prompt and uses an attacker LLM to generate and revise adversarial prompts. It also uses a Judge model to assess the feedback from the victim, which is further utilized for revising the adversarial prompt. AutoDAN-A [534] utilizes crossover strategies and LLM-based mutation to revise adversarial prompts into stealthy sentences. AntoDAN-Trubo [539] AutoDAN-Turbo proposes to find useful strategies by prompting an LLM automatically. ToA (Tree of Attack) [536] iteratively uses an LLM to transform the unsafe prompt into two variations and keeps the prompt variation that achieves a higher score. Xiao et al. [585] adopt a similar pipeline with PAIR [261] and introduce malicious content concealing and memory reframing. Puzzler [586] proposes defensive and offensive measures to conduct an indirect jailbreak. GPT-FUZZER [587] starts from human-written prompts, and uses templates and mutation to rewrite unsafe prompts. + +TABLE 6: A summary of attacks for LLM after deployment. Our evaluation includes representative studies that exemplify these security aspects. More details can be found in the main text. OS indicates whether the code is open-sourced. + +
AttacksMethodOSYearStrategySettingDatasetsTarget ModelsMetrics
Model ExtractionCarlini et al. [503]Yes2024Binary SearchBlack-boxNoneGPTs, LLaMA, Pythia,ada, cabbageQuery&TokenCost, MSE, RMS
Finlayson et al. [504]No2024Softmax BottleneckBlack-boxNonePythia, GPT-3.5Query Cost
Zanella et al. [505]No2024Matrix OperationsGrey-boxSST-2, MNLI, AGNewsBERTs, XLNetQuery Cost, Acc,Agreement
Horwitz et al. [506]Yes2024Spectral DeTuningWhite-boxLoWRAViT, SD, MistralMSWE, SEM
Membership InferenceMIN-K% PROB [509]Yes2023ProbabilitiesBlack-boxWikipediaLLaMAs, Pythia, NeoX,OPTTPR, FPR, ROC,AUC
MIN-K%++ [510]Yes2022Local MaximaBlack-boxWikiMIA, MIMIRPythia, GPT-NeoXLLaMA, OPT, MambaAUROC, TPR, FPR
Blind [511]Yes2024ThresholdBlack-box8 setsGPT-3, OpenLLaMAAUC ROC
LLM-DI [512]Yes2024AggregationBlack-boxPILEPythiasAUC, p-values
DE-COP [513]Yes2024ParaphrasesBlack-boxarXiv:Tection, BookTectionMistral, Mixtral, LLaMA, GPTs, ClaudeAUC
Recall [514]Yes2024Log-LikelihoodsBlack-boxWikiMIA, MIMIRPythia, GPT-NeoXLLaMA, OPT, MambaAUC, TPR@FPR
Noisy [515]No2024Embedding NGBRsGray-boxOpenWebText,WikipediaGPT-2TPR, FPR, AUC
SMIA [516]No2024PerturbationGray-boxWikipedia, FANPythia, Pythia-Deduped, GPT-NeosAUC-ROC, TPR, FPR
FEATAGG [517]No2024Feature AggregationBlack-boxProjectGutenberg,ArXivOpenLLaMATPR@FPR, AUC
RAG-MIA [520]No2024Direct AskingBlack-boxHealthCareMagic,Enronflan, llama, mistralTPR@FPR, AUC-ROC
JailbreakGCG [260]Yes2023Gradient-basedWhite-boxVicuna, LLaMA-2AdvBenchASR, Loss
AmpleGCG [530]Yes2024Hybrid-basedWhite-boxVicuna, Llama-2,Mis-tral,GPTsAdvBenchASR, US, Diver-sity, Time
I-GCG [531]Yes2024Gradient-basedWhite-boxAdvBench,HarmBenchVICUNA, GUANACOLLAMA, MISTRALASR
MA-GCG [532]Yes2024Gradient-basedWhite-boxAdvBenchVicuna, MistralASR, Time
A-GCG [533]Yes2024Gradient-basedWhite-boxAdvBenchLlama2, VicunaASR, Acc
AutoDAN-A [534]Yes2023LLM-basedBlack-boxAdvBenchVicuna, MistralASR, Recheck,PPL
AutoDAN-B [535]Yes2023Gradient-basedWhite-boxAdvBenchVicuna, Guanaco, PythiaASR, Recheck
PAIR [261]Yes2023LLM-basedBlack-boxJailbreakBenchVicuna, Llama-2, GPTs,Claudes,GeminiASR, QPS
ToA [536]Yes2023LLM-basedBlack-boxAdvBench, Harm123Vicuna, Llama-2, PaLM-2,GPTs, Claude3, GeminiGPT4-MetricHuman-Judge
PAL [537]Yes2024LLM-basedBlack-boxAdvBenchLlama-2, GPT-3.5ASR, Manual Labeling
Masterkey [538]No2023RephrasingBlack-boxAdvBench, Harm123GPTs, Bing, BardASR, QSR
AutoDAN-Turbo [539]Yes2024LLM-basedBlack-boxHarmbenchLlama-2, Gemma, GPT-4,GeminiASR, StrongRE-JECT
FlipAttack [540]Yes2025RephrasingBlack-boxAdvBench, StrongRE-JECTGPTs, Claude 3.5 Sonnet, Llama 3.1 405B, Mixtral 8x22BASR
Geneshift [541]Yes2025LLM-basedBlack-boxAdvBenchGPTsASR
Prompt InjectionIPP [542]Yes2022HandcraftBlack-boxOpenAI Examplestext-davinciASR
Greshake et al. [543]Yes2023Data PoisoningBlack-boxNonetext-davinci, GPT-4None
HOUYI [544]Yes2023Components AsmblBlack-boxFive QueriesSUPERTOOLSManual
Yan et al. [130]Yes2023PoisoningBlack-boxSeveral CasesAlpacaNgt, Pst, Ocrc
TT [545]No2023GameBlack-boxTensor TrustGPTs, Claudes, PaLM, LLaMAsRobustness Rate
JudgeDeceiver [546]Yes2024Gradient-basedWhite-boxMT-Bench, LLMBarMRPC, Jfleg, HSOL,RTE, SST2, SMSMistral, Openchat, LlamasACC, ASR, PACKEY-E, LM-E
AUPI [547]Yes2024Gradient-basedWhite-boxMRPC, Jfleg, HSOL,RTE, SST2, SMSLlama2ASR
AUTOHIJACKER [548]No2024LLM-basedBlack-boxAgentDojo, OPILlama, Command-R,GPTsASR
Data Extractionzlib [108]Yes2020Generate & InferenceBlack-boxTop-n, Temperature, InternetGPT-26 metrics
AutoSklearn [549]No2023Greedy, Contrastive, Beam decodingBlack-boxPileGPT-NeoPrecision, Recall,R@FPR
DECOM [550]No2024DecompositionBlack-boxNYT, WSJFrontiersTRM, EMP,BITAP
Context [551]No2022Context, Zero-shot,Few-shotBlack-boxEnron CorpusGPT-NeoAcc
ETHICIST [552]Yes2023Prompt TuningGray-boxLM-ExtractionGPT-NeoRecall
Pli-compass [553]No2024GroundingBlack-boxEnron emailGPT-JExtraction Rate
DSP [554]No2024Dynamic Soft PromptingBlack-boxLMEB, The StackGPT-Neo, Pythia, Star-CoderBaseEER, FER, PPL
PWB [555]Yes2024Gradient-basedWhite-boxPilePythia, LlamaPrecision, AUC,TPR
Prompt StealingSha et al. [556]No2024LLM-basedBlack-boxRetrievalQA,AlpacaGPT4ChatGPT, LLaMAAcc, Precision, Recall, AUC
output2prompt [557]Yes2024LLM-basedBlack-box3 User & 3 SystemPromptsLlamas, GPTsBLEU, CS, Preci-sion, Recall
PRSA [558]No2024Output DifferenceBlack-boxCategory18GPTsBLEU, FastKAS-SIM, JS
+ +ECLIPSE [588] uses an LLM as a suffix generator and optimizer. PAL [537] proposes an online proxy model (which is used for adversarial prompt generation) training pipeline. + +$\Rightarrow$ Others. EnJa [589] proposes to ensemble prompt and token-level attack methods via a template-based connector. AmpleGCG [530] first collects lots of successful suffixes and then trains the generative model to generate a specific suffix for a given unsafe prompt. Zhao et al. [590] targets the scenario where the decoding process of target LLM is assisted with smaller models' guidance. + +Prompt Injection Attacks. Prompt injection is a vulnerability where an attacker manipulates the input prompts of LLMs to force them to generate a specific output, which is usually out of the range for normal use (e.g., goal hijacking and prompt leaking [542]), often by injecting malicious text or commands into the input field. Attackers can employ a variety of techniques to carry out such attacks. + +$\Rightarrow$ Direct Prompt Injection. Perez et al. [542] directly inject handcrafted adversarial prompts into inputs to misalign the language model. HOUYI [544] proposes an injection generation framework which includes three components. Yan et al. [130] utilize LLMs to generate diverse trigger instructions that implicitly capture the characteristics of trigger scenarios. TENSOR TRUST leverages the TENSOR TRUST web game to generate a large-scale dataset and benchmark [545]. AUPI [547] adopts a gradient-based optimization method, specifically, a momentum-enhanced optimization algorithm, to generate universal prompt injection data. Upadhayay et al. [591] argue that LLMs suffer from cognitive overload and propose to use in-context learning to jailbreak LLMs through deliberately designed prompts that induce cognitive overload. Kwon et al. [592] circumvent security policies by substituting sensitive words—likely to be rejected by the language model—with mathematical functions. +$\nLeftrightarrow$ Indirect Prompt Injection. Greshake et al. [543] propose to indirectly inject prompts into the data that are likely to be retrieved. Bagdasaryan et al. [593] design a prompt injection attack against multi-modal LLMs, by generating an adversarial perturbation corresponding to the prompt and blending it into an image or audio recording. Neural Exec [594] designs a multi-stage preprocessing pipeline for cases like Retrieval-Augmented Generation (RAG)-based applications. PoisonedAlign [595] boosts the success of prompt injection attacks by strategically creating poisoned alignment samples in the LLM's alignment process. TPIA [596] crafts non-functional perturbations that contain malicious information and inserts them into the victim's code context by spreading them into potentially used dependencies like packages or RAG's knowledge base. F2A [597] proposes to use feign security detection agents to bypass the defense mechanism of LLMs. AUTOHIJACKER [548] uses a batch-based optimization framework to handle sparse feedback and leverages a trainable memory to enable effective generation. +Different Settings. JudgeDeceiver uses gradient-based optimization to inject LLM-as-a-Judge scenarios [546]. Pedro et al. [598] study the risk of injections targeting web applications based on the Langchain framework. Lee et + +al. [599] propose a human-AI collaborative framework to explore the potential of prompt injection against federated military LLMs. PROMPT INFECTION [600] proposes to make malicious prompts self-replicate across interconnected agents in multi-agent systems. Zhang et al. [601] explore the risk of prompt injection in LLM-integrated systems like LLM-integrated mobile robotic systems. + +Data Extraction Attacks. Data extraction attacks try to figure out the personally identifiable information (PII) that is used to train the LLMs [108]. It starts from sufficient-length prefixes to perform extraction and additional measures to determine if extracted texts are valid. + +$\nLeftrightarrow$ Methods. In the beginning work [108], the proposed extraction process contains two stages "generate-then-rank": sampling potentially memorized examples and membership inference. It proposes a temperature-decaying method to sample more diverse examples and use surrogate models to infer the membership. After that, Al-Kaswan et al. [549] propose using greedy, contrastive, and beam decoding strategies to generate examples and use a classifier to infer the membership. Su et al. [550] propose an instruction decomposition technique to extract fragments of training data gradually. Huang et al. [551] extensively explore the effect of context, zero-shot, and few-shot methods in extracting the personal email address. ETHICIST proposes a smoothing loss and a calibrated confidence estimation method to extract the suffix and measure the confidence [552]. Nakka et al. [553] improves the extraction performance by grounding the prefix of the manually constructed extraction prompt with in-domain data. Wang et al. [554] propose to train a transformer-based generator to produce dynamic, prefix-dependent soft prompts. Ozdayi et al. [105] introduce an approach that uses prompt tuning to control the extraction rates of memorized content. Meng et al. [602] propose a two-stage method, i.e., collection and ranking, to recover PPI when PII entities have been masked. +Different Settings. Some works also explore the risk of data leakage in novel settings. Wang et al. [555] study the probability of data extraction in fine-tuning settings and Bargav et al. [603, 604] extract the training data by comparing the output difference before and after the fine-tuning. Jiang et al. [605, 606, 607] propose to extract the private Retrieval-Augmented Generation (RAG) documents. Peng et al. [608] extract the private RAG documents by poisoning in the fine-tuning process. Nasr et al. [107] explore the potential risk of data extraction for the aligned production language models. Panda et al. [609] extract the fine-tuning secret data by poisoning the pertaining dataset. Lu et al. [610] propose to extract PII from an aligned model with model merging. Chen et al. [611] find that fine-tuning can recover the forgotten PIIs in pretraining data. Panchendrarajan et al. [612] propose to extract the whole private training data in the fine-tuning process. Rashid et al. [613] propose selective weight tampering to explore PPI leakage in Federated Language Models. Dentan et al. [614] extract data from layout-aware document understanding models like unimodal or bimodal models. +Different Applications. Leveraging the abnormally high + +token probabilities, some works utilize the memorization of LLMs to extract the fingerprint or steganography [615]. Al-Kaswan et al. [616] explore memorization in large language models for code and find that code models memorize training data at a lower rate than natural language models. Nie et al. [617] utilize the token-level features derived from the identified characteristics to decode the PII. Lehman et al. [618] reveal the risk of Electronic Health Records leakage of LLMs. Diera et al. [619] conduct experiments to assess the PII leakage of fine-tuned BERT models and found that Differential Privacy (DP) has a negative effect when deployed in fine-tuning. Zhang et al. [620] propose data extraction attacks against text classification with transformers. Huang et al. [621] propose an evaluation tool, i.e. HCR, to assess the PPI leakage in Neural Code Completion Tools. + +$\nrightarrow$ Factor Assessment. Some work studies the factors of data extraction including decoding schemes, model sizes, prefix lengths, partial sequence leakages, and token positions [622, 623]. Yash et al. [624] explore the effects of prompt sensitivity and access to multiple checkpoints to extraction attacks. Staab et al. [625] construct a dataset consisting of real Reddit profiles to extract personal attributes. Xu et al. [626] conduct experiments to evaluate the factors of different suffix generation methods and different membership inference attacks in extraction performance. Karamolegkou et al. [627] evaluate the effect of model structure, data type, probing strategies, and metrics. + +Prompt Stealing Attacks. Given that crafting effective prompts requires significant engineering effort and can be considered valuable intellectual property (IP), promptstealing attacks aim to compromise this IP by reconstructing prompts from generated responses [556, 557, 558]. These generation effects are often used to attract prospective prospective buyers. Sha et al. [556] pioneer this approach by collecting a dataset and training classifiers to predict prompt parameters—such as whether the prompt is direct, role-based, or in-context. They then used a large language model (LLM) to reconstruct the prompt. Similarly, Zhang et al. [557] trained an LLM on output-prompt pairs to directly infer the original prompt, while Yang et al. [558] leveraged generation differences to refine surrogate prompts. However, recovering the original prompt solely from the output is challenging. Out of this, Zheng et al. [628] propose a timing-based side-channel method to infer the prompt during inference. + +# 6.1.2 Defensive Mechanisms in Deployment + +In Subsubsection 6.1.1, we analyzed various attack scenarios targeting individual LLM deployments. However, in real-world applications, defense mechanisms are not designed as isolated, one-to-one countermeasures against specific attacks. Instead, they follow fundamental security principles to establish a systematic defense framework, as illustrated in Figure 9. This framework integrates multiple layers of protection, ensuring resilience against a wide range of adversarial threats while maintaining model usability and efficiency. + +Input Preprocessing Defenses Input preprocessing serves as the first line of defense in LLM deployment, aiming to + +![](images/f4ce229cb80a8c96656ded2aa655c39cbe884dd49ac1e9e0e1650264348f5451.jpg) +Defensive Mechanisms in Deployment +Fig. 9: The overview of attacks in single LLM's deployment phase. + +detect and neutralize adversarial inputs before they reach the model. + +Attack Detection & Identification: Effective input filtering [629, 630] begins with attack detection [631], which identifies adversarial prompts through statistical [632], structural [633], or behavioral inconsistencies [634]. Gradient-based detection methods [635] leverage safety-critical gradient analysis and loss landscape exploration to uncover jailbreak prompts that manipulate LLM behavior. These approaches identify adversarial inputs [636, 637] by analyzing how small perturbations [638] affect model outputs, detecting highly sensitive or misaligned gradients that indicate targeted attacks. Perplexity-based methods [632, 632] measure the probability distribution of input sequences, flagging atypical or low-likelihood prompts as potential adversarial inputs. These techniques are particularly effective in detecting prompt injection and adversarial perturbations, where crafted prompts deviate significantly from natural language distributions. + +Beyond individual heuristics, universal detection frameworks [639] integrate multiple detection strategies to counter diverse attack vectors, including prompt injection [640], backdoor manipulations [641], and adversarial attacks [637]. These frameworks employ ensemble-based filtering mechanisms, combining gradient analysis [642], perplexity estimation [643], and syntactic evaluation for generalized attack resilience. + +Semantic & Behavioral Analysis: Attack detection alone is insufficient, as certain adversarial inputs may bypass traditional filtering mechanisms. Semantic [644] and behavioral analysis enhance input preprocessing by evaluating linguistic intent and model alignment. Self-examination techniques allow LLMs [645, 646] to assess whether they are being manipulated, leveraging auxiliary reasoning steps to detect deceptive prompts. Alignment-based verification [647] ensures that the model's responses remain consistent with its safety objectives [330], identifying inputs that subtly nudge the model toward policy violations or ethical misalignment. Intention analysis [648, 649] further refines input filtering by discerning subtle manipulations designed to bypass explicit security checks. Unlike token-level detection, which flags overtly adversarial inputs, intention-aware defenses analyze the semantic structure and purpose of the input to preemptively reject jailbreak attempts. + +Adversarial Defense & Mitigation: When detection and behavioral analysis fail to fully neutralize adversarial inputs, robustness-enhancing techniques [647] mitigate their effects by reducing model susceptibility to manipulation [334, 650]. Semantic smoothing [651, 652] techniques + +introduce controlled randomness into LLM responses, reducing the model's sensitivity to adversarial perturbations and preventing reliable jailbreak execution. By stabilizing decision boundaries [653], these methods enhance resistance against prompt manipulation strategies that exploit response predictability. + +Preemptive input transformations [654], such as back-translation [655] or paraphrasing, modify incoming queries [651] while preserving semantic intent, disrupting adversarial structures embedded within malicious prompts. Data augmentation [656] and adversarial training further strengthen model robustness by exposing LLMs to adversarial prompts during training, forcing them to learn invariances that reduce their vulnerability to real-world attacks. + +Output Filtering Mechanisms. Output filtering mechanisms [212, 657] serve as a critical safeguard in LLM deployment, ensuring that generated responses comply with safety constraints while preserving informativeness. Unlike input preprocessing, which aims to prevent adversarial prompts from reaching the model, output filtering mitigates harmful content post-generation. Existing approaches primarily follow three paradigms: rule-based constraints, generative adversarial filtering, and toxicity detection. + +Rule-based mechanisms [658] impose predefined constraints on model outputs, preventing the generation of harmful, unethical, or undesired content. Programmable guardrails [659] offer a structured framework where developers can enforce response filtering, topic restriction, and ethical alignment. These methods often integrate reinforcement learning from human feedback [155] or rule-based reward [660] modeling to refine output safety. While effective at handling explicit violations, static rule-based methods struggle with nuanced adversarial prompts and subtle misalignments. + +To address these limitations, generative adversarial filtering [661] leverages self-critique [662, 663], ensemble detection, and dynamic response evaluation [664]. Self-rectification mechanisms [663, 665] enable LLMs to critique their own outputs and refine responses through iterative refinement. Additionally, ensemble-based [666] moderation models aggregate predictions from multiple LLMs, improving robustness against circumvention techniques. Adaptive filtering frameworks [667] employ perplexity-based assessments and adversarial perturbation detection to flag responses deviating from expected linguistic patterns, enhancing their resilience against jailbreak attempts [668, 669] and toxic content injection. + +Toxicity detection [670, 671, 672] and content moderation [673, 674, 675, 676] further reinforce output safety by identifying and mitigating hate speech [677], misinformation, and other harmful content. Supervised finetuning adapts LLMs to recognize undesirable patterns, while classifier-based detection models [678] filter responses in real-time. Some approaches introduce debiasing strategies, such as controlled decoding [679, 680] and anti-expert guidance [681], to suppress toxic outputs without sacrificing response diversity. However, these methods face challenges in balancing false positives and false negatives, particularly in ambiguous or context-dependent cases. + +The effectiveness of output filtering hinges on its ability to balance strict control with linguistic flexibility, ensur + +ing that models remain both safe and practically useful. A hybrid approach combining rule-based safeguards, self-correcting mechanisms, and adaptive toxicity moderation is essential to achieving robust and scalable LLM deployment. Robust Prompt Engineering. Robust prompt engineering aims to enhance LLM safety by designing input prompts that resist adversarial manipulation [682], protect sensitive data, and mitigate harmful outputs—all [683] without modifying model parameters. These strategies act at the interaction level, offering lightweight and model-agnostic protection. + +Recent efforts have introduced prompt optimization techniques grounded in adversarial robustness, including embedding-space manipulation and defensive objective alignment. Methods such as Robust Prompt Optimization [684] and Prompt Adversarial Tuning generate transferable suffixes [668] or prefix [685] embeddings to guide model behavior [686] under attack [687], effectively lowering jailbreak success rates while preserving task performance. Similarly, goal prioritization frameworks [688] enforce inference-time objective consistency, dynamically resolving conflict between user instructions and safety constraints without requiring access to malicious samples. Complementary to these strategies, patch-based methods integrate interpretable suffixes or structured self-reminders [689] into prompts, reducing the model's susceptibility to coercive inputs through lightweight, modular defenses. + +Structural manipulation approaches [690] neutralize adversarial intent through prompt rewriting. Spotlighting [691] injects source-attribute signals to counter indirect prompt injection, while inverse prompt engineering [692] repurposes attack data to generate task-specific defensive prompts under the principle of least privilege. + +Privacy-preserving prompt [693] design introduces formal guarantees through differential privacy. Approaches like DP-Prompt [694] and stochastic gradient masking [695] reduce information leakage from prompts without harming performance. Desensitization and directional control of incontext representations offer additional privacy protections during prompt construction. Prompt engineering [579, 696] also helps mitigate societal risks. Chain-of-thought prompting and guided templates reduce gender bias [697] in reasoning tasks, while prompt learning [698] improves toxicity detection and generation control [699, 700], often surpassing specialized models in efficiency and generalization. + +Finally, systematic prompt optimization methods [701, 702] aim to generalize prompt robustness across tasks and domains. Techniques like BATPrompt [703] and StraGo [704] use adversarial simulation and strategic decomposition to refine prompts iteratively, improving both resilience and effectiveness under variable inputs. + +System-level Security Controls. System-level defenses [705] enhance LLM deployment by optimizing inference, enforcing alignment, isolating untrusted inputs, and securing the supply chain. Systems like Petals [706], Sarathi-Serve [707], and DistServe [708] restructure computation to improve throughput and latency, while TriForce [709], Medusa [710] MagicDec [711] accelerate generation via speculative decoding and structural compression. Parallel frameworks such as DeepSpeed-FastGen [712] and SpecExec [713] further boost + +efficiency with minimal overhead. + +Runtime alignment methods [714] adapt model behavior through cross-model guidance or token-level reward modeling. Systems such as SelfDefend [715] and Gradient Cuff [716] detect unsafe generation by monitoring agreement across models or loss landscapes, while Spotlighting [691] inserts provenance signals to mitigate indirect prompt injection. + +Access isolation is achieved through policy enforcement [717] and system wrappers [688]. At the supply level, tools like MalHug [718] identify poisoned models, while system audits reveal sandbox and plugin vulnerabilities, highlighting the need for end-to-end secure deployment. + +LLM-based guard models utilize lightweight LLMs like Llama Guard [330], Aegis Guard [719, 720], WildGuard [721], and ShieldGemma [722] to moderate both the input and output of the victim LLMs. However, they are purely classifiers. To solve this problem, the first reasoning-based guard model named GuardReasoner [723] is proposed to improve the performance, explainability, and generalization ability via learning to reason. It brings new opportunities for the safety of large-scale reasoning models [724]. + +# 6.1.3 Evaluation and Benchmarks in Deployment + +To assess the reliability and safety of LLMs after deployment, evaluation efforts focus on several key dimensions and risk types, as illustrated in Figure 10. These dimensions guide the design of systematic benchmarks and metrics tailored for real-world deployment settings. + +![](images/7e5d6796694a8d1a706054c8a700440e2e26505bd752ab4e4efae519e5f05197.jpg) +Fig. 10: The overview of evaluation and benchmarks in single LLM's deployment phase. + +Robustness Evaluation. To systematically assess the relia- + +TABLE 7: Summary of LLM robustness benchmarks at the deployment stage. + +
BenchmarkAdversarialNaturalJailbreakToxicity
JailbreakBench [306]
HarmBench [305]
JAMBench [725]
JailbreakEval [726]
Latent Jailbreak [727]
PromptRobust [728]
SelfPrompt [729]
Chen et al. [730]
Chu et al. [731]
AdvGLUE [732]
AdvGLUE++ [333]
NoiseLLM [733]
NEO-BENCH [734]
CompressionEval [735]
+ +bility of large language models (LLMs) after deployment, we categorize robustness evaluation into two broad types: adversarial robustness and natural robustness. Adversarial robustness focuses on evaluating how LLMs respond to malicious or adversarial inputs, such as jailbreak prompts, prompt injections, or red-teaming attacks. Natural robustness, on the other hand, assesses LLM behavior under nonmalicious but realistic distribution shifts, including typos, paraphrasing, novel word usage, or temporal drift. A summary of representative benchmarks categorized along these 4 dimensions is presented in Table 7. + +Adversarial Robustness: A range of benchmarks and frameworks have been proposed for adversarial robustness. JailbreakBench [306] provides a standardized evaluation suite for jailbreak attacks, containing 100 misuse behaviors and an evolving repository of adversarial prompts. HarmBench [305] proposes a comprehensive red-teaming evaluation framework that includes 510 harmful behaviors spanning diverse semantic and functional categories, supporting both text-only and multimodal inputs across 33 LLMs. JAMBench [725] targets the evaluation of moderation guardrails using 160 carefully constructed prompts across four major risk categories and introduces a cipher-character-based attack. JailbreakEval [726] offers a unified toolkit for jailbreak assessment with string-matching, classifier-based, and LLM-based evaluators. Latent Jailbreak [727] focuses on detecting embedded malicious intent in seemingly benign prompts and evaluates instruction-following robustness using a hierarchical annotation scheme. PromptRobust [728] benchmarks prompt-level robustness with character, word, sentence, and semantic-level perturbations across 13 datasets and 8 NLP tasks. SelfPrompt [729] enables autonomous robustness evaluation through knowledge-guided prompt generation and LLM-based self-assessment. Chu et al. [731] conduct a large-scale comparison of 17 jailbreak attacks on 8 LLMs and 160 forbidden prompts, proposing a unified taxonomy and benchmarking various defenses. Chen et al. [730] propose a multi-dimensional framework assessing jailbreak reliability over 13 LLMs and 1,525 prompts, integrating metrics such as attack success rate (ASR), toxicity, fluency, and grammatically. Zhang et al. [736] propose a novel definition and benchmark for LLM's content moderation based on a sensitive-semantic perspective. + +Natural Robustness: Several benchmarks focus on evaluating LLMs under realistic but benign input perturbations or distribution shifts. AdvGLUE [732] and AdvGLUE++ [333] extend the original GLUE benchmark [737] with semantically-preserving perturbations at logic, word, and sentence levels. NoiseLLM [733] presents a unified framework for evaluating slot-filling robustness under character-, word-, and sentence-level noise, including typos and paraphrases. NEO-BENCH [734] assesses robustness to temporal drift by introducing neologisms into tasks such as machine translation, classification, and question answering. CompressionEval [735] provides a prompt-free evaluation framework using lossless compression to assess generalization and robustness, comparing LLM performance on content before and after the model's knowledge cutoff. These benchmarks offer complementary perspectives for assessing LLM performance under both malicious and naturally + +occurring input variations. + +Content Trustfulness and Fairness Evaluation. Beyond ro + +TABLE 8: Summary of content trustfulness and fairness evaluation benchmarks for LLMs at deployment stage. + +
BenchmarkHallucinationFactualityToxicityBiasDiscrimination
HaluEval [738]
Med-HALT [739]
ANAH [740]
SelfCheckGPT [741]
DoLa [742]
Mundler et al. [743]
Elaraby et al. [744]
Ji et al. [745]
Zhang et al. [746]
Guo et al. [747]
RTP-LX [748]
ROBBIE [749]
CEB [750]
+ +bustness, a key dimension of deployment-stage evaluation concerns the trustfulness and fairness of LLM-generated content. This includes detecting and mitigating outputs that are factually incorrect (hallucinations), misleading (low factuality), harmful (toxic), or unfair (biased or discriminatory). We categorize existing benchmarks into five axes: hallucination, factuality, toxicity, bias, and discrimination, and summarize representative works in Table 8. + +Benchmarks in this space target either the accuracy of generated content or its alignment with human values. For hallucination and factuality evaluation, HaluEval [738] and MedHALT [739] provide reference-based hallucination annotations in general and medical domains, respectively, while ANAH [740] delivers fine-grained, human-annotated hallucination labels with correction spans. SelfCheckGPT [741] detects hallucinations via consistency checks across multiple generations, and DoLa [742] proposes a decoding strategy that contrasts internal layer activations to reduce factual errors. Other works such as Mundler et al. [743], Elaraby et al. [744], and Ji et al. [745] leverage taxonomic definitions or internal model signals to quantify or predict hallucination risk. Zhang et al. [746] introduce FEWL, a reference-free evaluation framework that uses agreement across reference LLMs to approximate hallucination likelihood. + +In terms of toxicity detection, Guo et al. [747] show that role-playing prompts (persons) can elicit toxic behavior from ChatGPT, and RTP-LX [748] evaluates multilingual LLMs in detecting culturally sensitive harm. Both studies reveal that current LLMs remain vulnerable to subtle toxic or culturally biased outputs, especially in low-resource languages or when confronted with indirect harm. + +For evaluating social bias and discrimination, ROBBIE [749] benchmarks LLMs across 12 demographic axes with template-based prompts and multiple toxicity and regard metrics, covering gender, race, religion, and intersections thereof. CEB [750] proposes a compositional taxonomy for fairness evaluation and introduces multiple new datasets spanning stereotyping, toxicity, and classification bias, supporting both direct and indirect evaluation modes. + +These benchmarks collectively provide a multidimensional view of content trustfulness and fairness, enabling the systematic evaluation of LLMs beyond syntactic correctness or surface fluency. As safety-critical deployment scenarios become increasingly prevalent, such evaluation tools play a central role in ensuring the responsible use of LLMs. + +Data Privacy and Leakage Evaluation. Data privacy is + +TABLE 9: Summary of privacy evaluation benchmarks for LLMs at the deployment stage. + +
BenchmarkPIIMIAEIACompliance
PrivLM-Bench [751]
LLM-PBE [752]
PrivAuditor [753]
Rossi et al. [754]
Whispered Tuning [755]
ProPILE [103]
PrivaCI-Bench [756]
Commercial Audit [757]
LessLeak-Bench [758]
SecureSQL [759]
DecodingTrust [333]
+ +a critical dimension in evaluating the trustworthiness of LLMs at deployment. Table 9 summarizes representative benchmarks that assess privacy risks along four axes: personally identifiable information (PII) leakage, membership inference attacks (MIA), embedding inversion attacks (EIA), and regulatory or contextual compliance. + +PrivLM-Bench [751] and LLM-PBE [752] offer comprehensive multi-level evaluations spanning all three major attack types. PrivAuditor [753] and Rossi et al. [754] focus on adaptation-stage vulnerabilities across a variety of finetuning techniques. Whispered Tuning [755] proposes a differential privacy-based training scheme to reduce leakage, while ProPILE [103] tests whether LLMs can reconstruct sensitive information from prompts related to known users. + +PrivaCI-Bench [756] and Commercial Audit [757] emphasize regulatory compliance, evaluating model behavior against privacy expectations and legal frameworks such as GDPR and the EU AI Act. SecureSQL [759] examines leakage in structured query generation, and LessLeak-Bench [758] reveals code-specific leakage across software engineering benchmarks. Finally, DecodingTrust [333] includes privacy as part of a broader trustworthiness suite, auditing GPT models across multiple risk dimensions. + +Together, these benchmarks provide a foundation for assessing LLM privacy risks across diverse modalities, attack surfaces, and deployment scenarios. + +Multi-modal Safety Evaluations As multimodal large language models (MLLMs) become increasingly integrated into real-world applications, ensuring their safety under diverse input conditions is essential. A growing number of studies have proposed evaluation benchmarks and frameworks to assess MLLM vulnerabilities across multiple dimensions [760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782]. + +Jailbreak evaluation has received significant attention, with benchmarks such as MM-SafetyBench [760] and Jailbreakv-28k [761] targeting harmful instruction-following behaviors. MMJ-Bench [762] and Retention Score [763] further extend jailbreak assessment to include visual robustness and long-term safety retention. For hallucination, several works diagnose MLLM failures arising from inconsistencies between visual inputs and generated text, including HallusionBench [764], POPE [765], and Bingo [766]. SIUO [767] complements this direction by evaluating cross-modality consistency under seemingly benign inputs. + +Robustness under adversarial visual corruption is assessed in MVTamperBench [768] and B-AviBench [769], + +which introduce perturbed or misleading visual stimuli to test model stability. Meanwhile, fairness and social bias have been evaluated through VIVA [770], GenderBiasVL [771], FACET [772], FairDeDup [773], CounterBias [774], PAIRS [775], DeAR [776], and MMBias [777], covering gender, racial, and intersectional dimensions using parallel image sets, counterfactual probing, and real-world dataset imbalances. + +To unify these evaluation directions, several comprehensive frameworks have emerged. MultiTrust [778] and SPAVL [779] aim to benchmark MLLMs across diverse safety criteria, including robustness, fairness, and harmfulness. Q-Eval-100K [780] complements these efforts by focusing on visual generation quality and alignment under instruction-following settings. + +Collectively, these benchmarks highlight the unique challenges posed by multimodal interactions and the growing need for holistic, scalable safety evaluations tailored to MLLMs. + +# 6.2 Single-agent Safety + +In this section, we focus on security issues related to a single agent. We first define an agent as an interactive entity that uses an LLM as the core for reasoning, decision-making, and reflection while integrating memory, tools, and the environment as capability-enhancing components. Beyond the deployment risks associated with the LLM core, we introduce the security issues arising from these three additional modules. Specifically, for tools (Section 6.2.2) and memory (Section 6.2.3), we summarize existing work from both attack (Section 6.2.4) and defense (Section 6.2.5) perspectives to identify technical paradigms. For the environment (Section 6.2.6), we explore unique security challenges from the perspective of various agent-interaction settings. We demonstrate an overview of agent safety in Figure 12. + +# 6.2.1 Definition of Agent + +LLM-driven agent refers to an AI system capable of operating independently or with limited human oversight, where a sophisticated language model [6, 783, 784, 785] serves as the foundational intelligence for processing inputs, executing tasks, and engaging in interactions. By leveraging advanced natural language understanding and generation, such agents [29, 786, 787, 788, 789] can analyze information, resolve queries, and adapt to user or environmental inputs [790, 791, 792]. To extend their functionality, they frequently incorporate supplementary mechanisms—such as data storage modules [23, 793, 794, 795], external software interfaces [790, 796, 797], or strategic reasoning frameworks [798]—allowing them to transcend basic text production. This adaptability makes them valuable for diverse implementations, including interactive dialogue systems [799], workflow optimization [800, 801, 802, 803], and complex decision-making scenarios [804]. In this study, we focus on deconstructing agent safety into three critical dimensions: tool utilization, memory management, and environment-specific security concerns. We demonstrate the components and structures of agent systems in Figure 11. + +# 6.2.2 Tool Safety + +Some works enable LLM agents to learn how to use tools by generating datasets and fine-tuning the model for API usage [25, 805]. Specifically, tools can be implemented in various forms, including but not limited to code-based API functions (e.g., search engine [806] and calculator), embodied intelligence like robotic arms [807], and more. A tool serves as a bidirectional medium: on one hand, it allows the agent to map internal decisions into actions within the interactive environment; on the other hand, it also acts as a means for the agent to collect information from the external world. Given the pivotal role of tools in agent components, the related security issues are worth exploring [74]. For example, in the field of web security, Fang et al. [808, 809] investigate how autonomous agents, when equipped with appropriate tools, can independently compromise websites and exploit one-day vulnerabilities in real-world systems without human intervention. Next, we will summarize and discuss existing research from attack perspectives and figure out the lack of tool invocation defense in current research. + +Attacks. Based on the target of the attack, safety-related attacks involving tools can be categorized into Tool-aided Attacks and Tool-targeted Attacks. The former refers to attackers utilizing agents equipped with tools to execute attacks that LLMs cannot independently assist with, such as leveraging agents with web access and code execution capabilities to facilitate cyberattacks. The latter involves attackers targeting the tool invocation process itself, attempting to manipulate or induce tool selection for malicious purposes through various attack methods. However, from the perspective of the technical stack of attacks, the two can be unified. We have identified new applications of traditional LLM attack methods in tool safety, as well as novel attack paradigms that have emerged due to the unique characteristics of tools. + +Jailbreak. Similar to jailbreak methods in LLM safety, agent jailbreak also bypasses the agent's built-in safety mechanisms through specific prompts to elicit malicious responses. However, in the agent scenario, the malicious behaviors it aims to induce are different. Specifically, Cheng et al. [810] manually craft jailbreak prompts to extract personal information from the training data of code-generation agents. In contrast, Fu et al. [811] and Imprompter [812] both employ gradient-based optimization like GCG [260] to automatically generate input prompts or images that manipulate agents into leveraging tools for privacy breaches in dialogues or executing harmful actions on user resources. + +Injection. This type of attack can be summarized into two forms of injection: Prompt Injection (similar to LLM safety vulnerabilities) where malicious instructions are embedded in input data, exploiting the difficulty LLMs face in distinguishing between instructions and data. Another form is Tool Injection where malicious tools are injected to enable further exploitation, such as using the tool to execute malicious actions. For example, BreakingAgents [813] utilizes human-crafted prompt injections to execute malfunction attacks, causing agents to engage in repetitive or irrelevant actions, with additional exploration into the propagation of such attacks within Multi-Agent Systems (MAS). ToolCommander [814] is the second type. It proposes a two-stage + +![](images/cf762587d56c382c4c037d7bf5aac6c071b7d0e9976abb0ab003388647d5eb60.jpg) +Fig. 11: The overview of LLM-based single-agent and multi-agent systems. + +attack strategy: first, injecting malicious tools to steal user queries, and subsequently manipulating tool selection using the stolen data, thereby achieving privacy theft and denial-of-service attacks. + +Backdoor. Backdoor attacks also find utility in the context of agent safety, but unlike LLMs, LLM agents develop diverse verbal reasoning traces through continuous environmental interactions, broadening potential backdoor attack vectors. Yang et al. [815] define two types of backdoor attacks, targeting either the final returned results or the intermediate processes of the attacking agent, and implement the above variations of agent backdoor attacks on two typical agent tasks, including web shopping and tool utilization. Furthermore, DemonAgent [816] decomposes a backdoor into multiple sub-backdoor fragments to poison the agent's tools. Beyond intentional guidance, studies such as BadAgent [817] highlight that backdoor attacks can inadvertently prompt agents to misuse tools for malicious purposes. + +Manipulation. This type of attack refers to directly or indirectly manipulating or altering the tool's returned content to leak sensitive information or carry out malicious actions. AUTOCMD [818] employs a separate LLM, trained on tool-calling datasets and fine-tuned with target-specific examples, to generate and replicate legitimate commands for extracting sensitive information from tools. Meanwhile, Zhao et al. [819] manipulate third-party API outputs by injecting malicious content or omitting critical information, ultimately causing erroneous or biased system behaviors. + +Defenses. Compared to attacks on agent tools, defense mechanisms for secure tool invocation have been less studied. Specifically, AgentGuard [820] employs LLM orchestrators to automatically detect unsafe tool-use workflows and produce safety constraints for secure tool utilization. PrivacyAsst [821] proposes an encryption-based solution by integrating an encryption scheme into the tool using LLM agents to safeguard user privacy and align them + +with computational security standards. In addition, some works enhance the security of agent systems by leveraging tool invocation, GuardAgent [822] pioneers an approach to verify target agents' trustworthiness by executing guardrail code through API calls during task plan implementation. + +# 6.2.3 Memory Safety + +The memory mechanism in LLM agents enables them to retain historical behaviors, thereby enhancing future decision-making capabilities. Typically, agent memory can be categorized into long-term and short-term memory systems. The long-term memory module commonly employs Retrieval-Augmented Generation (RAG) [823, 824] technology to facilitate precise information retrieval, while the short-term memory stores real-time data to support immediate conversational contexts and task execution. While these memory modules significantly improve agent functionality, they simultaneously introduce potential security vulnerabilities, making the system susceptible to malicious attacks. + +# 6.2.4 Attack + +Follow the trustworthy issues in [74], we categorize attacks related to memory into three types: Memory Poisoning, Privacy Leakage, and Memory Misuse. + +(I) Memory Poisoning refers to adversarial attacks where malicious data is injected into an agent's long-term memory [313, 825, 826, 827, 828, 829]. When the agent retrieves and utilizes such corrupted memory, it may produce erroneous outputs, misleading responses, or even hazardous actions. For example, PoisonedRAG framework [827] employs a dual optimization approach, simultaneously manipulating both the retrieval and generation pipelines to systematically poison the agent's memory system. AgentPoison [826] introduces an advanced backdoor attack methodology that optimizes trigger patterns and seamlessly integrates them into query formulations, significantly elevating the likelihood of malicious sample retrieval + +![](images/a2b149c02628f0cb46be90a88c408bf263d347f8b42ba68b6b83ded7364f1a70.jpg) +Fig. 12: The overview of the safety of LLM-based agent systems. + +while maintaining stealth. (II) Privacy Leakage occurs when attackers exploit the interface between an agent and its long-term memory to extract stored sensitive data [520, 605, 607, 830, 831]. Such breaches may expose user information to malicious third parties, posing significant real-world risks. (II) Memory Misuse refers to the deliberate construction of multi-turn query sequences that systematically circumvent safety protocols by exploiting the retention properties of agent short-term memory [752, 832, 833, 834, 835, 836]. This attack vector enables progressive erosion of defensive measures through iterative interaction patterns. + +# 6.2.5 Defense + +To counter these attacks, various defense approaches have been developed to enhance the robustness of memory systems [520, 835, 837, 838, 839]. (I) Detection Detection mechanisms primarily focus on identifying and eliminating malicious content retrieved from long-term memory systems [835, 838, 839?]. (II) Prompt Modification involves strategically rewriting user queries before processing by the agent to enhance response safety [520, 835]. (III) Output Intervention involves real-time monitoring and modification of agent responses prior to delivery to ensure safety and accuracy [825, 840]. + +# 6.2.6 Environment Safety + +Agents operate within dynamic and heterogeneous environments, spanning physical and digital domains [841, 842, 843]. Their interaction with these environments is a multistep process [844, 845]. First, agents engage in perception, gathering data from sources like sensors in a physical setup or digital platforms [806]. This perceived data is then analyzed using various algorithms and reasoning mechanisms to identify patterns and potential actions [846]. Based on this analysis, agents take action, which can either directly influence the environment, like an autonomous vehicle making + +a lane change [847], or modify their own internal state, such as a software agent updating its knowledge base [848]. + +However, this interaction is plagued by trustworthiness challenges. There are security risks in every process of interaction with the environment [849]. Agent roles and environmental constraints contribute to risks such as autonomous driving errors [850] and network disruptions [806, 851]. Given the diverse dynamic scenarios and related issues [849, 852, 853], the existing solutions are fragmented and lack a systematic framework. Thus, we will explore trustworthiness and security aspects by categorizing relevant papers according to whether they focus on ensuring safety in the perception, analysis, or action phase of the agent-environment interaction, as illustrated in Figure 10. + +Perception. The perception phase serves as the foundational layer of agent-environment interaction, where agents acquire raw data to interpret their surroundings. However, this phase is inherently vulnerable to risks such as data poisoning, environmental noise, and biased observations. Hudson [841] converts real-time sensory inputs into natural language representations augmented with security validation protocols, employing causal analysis techniques to improve reliability during adversarial perception scenarios. ChatScene [847] develops safety-oriented simulation environments for autonomous systems by converting linguistic commands into executable code compatible with CARLA's simulation architecture. Chen et al. [854] systematically categorize perceptual vulnerabilities in financial AI systems, identifying three primary risk categories: synthetic data generation errors, temporal inconsistency challenges, and susceptibility to engineered input manipulations. + +Reasoning. The reasoning phase transforms raw perceptual data into actionable insights through decision-making models, and knowledge-based inference. This stage is critical to ensure agents act appropriately in dynamic environments, + +![](images/9deb0bb23bd9a7df4575cdd26e5b8aac051c2140a99729831eb7c59ed428f59b.jpg) +Fig. 13: The overview of agent and environment interactions. + +but introduces unique trustworthiness challenges. Yang et al. [846] develop a temporal safety verification framework using formal logic systems, implementing dual mechanisms for auditing the compliance of safety protocols and filtration of hazardous decisions to meet the requirements of industrial robotics. Agents4PLC [855] establishes an industrial control programming framework that combines automated code synthesis with formal verification processes, integrating RAG [235] and COT [343] to ensure operational integrity. Xiang et al. [822] propose medical AI systems that employ semantic reasoning engines for confidential data protection. Park et al. [845] demonstrate improved threat detection capabilities through simulated organizational communication patterns in anomaly identification systems. + +Action. The action phase represents the culmination of agent-environment interaction, where agents execute decisions to influence their surroundings or update internal states. Trustworthiness at this stage hinges on ensuring that actions are safe, precise, and aligned with intended objectives. Fang et al. [851] reveal the capacity of autonomous systems to exploit digital infrastructure weaknesses through adaptive penetration testing, prompting the development of specialized evaluation frameworks for web agents. Furthermore, researchers develop frameworks to evaluate the truthfulness of web agents. Polaris [856] implements distributed AI architectures to enhance fault tolerance and response accuracy of healthcare interaction systems. La et al. [857] employ linguistic evolution models to simulate adaptive content generation patterns that circumvent automated moderation systems, providing insights for regulatory mechanism improvements. + +# 6.3 Multi-agent Safety + +In the previous section, we explored security issues in a single agent setting and this section expands the discussion to multi-agent systems (MAS) [58, 71, 858, 859, 860, 861]. Since a single agent has limited problem-solving capabilities and a relatively narrow perspective, it struggles to conduct a comprehensive analysis of complex problems. In contrast, in MAS, agents can interact through various mechanisms, such as cooperation, competition, and debate, enabling them to solve complex problems more efficiently and effectively [862]. However, these interactions also introduce more complex and diverse security challenges [863]. Consequently, compared to single-agent systems, MASs face more severe + +and intricate security risks [864]. Similarly, we summarize and discuss existing research from both attack and defense perspectives. + +# 6.3.1 Attack + +In MAS, security threats primarily stem from the propagation of harmful information, hallucinations, and biases through agent interactions, as well as the coordinated planning and optimization of attacks to target security agents within the system. These threats can arise spontaneously through the unintended amplification of misinformation or be deliberately orchestrated by malicious agents. Attack strategies in MAS often integrate multiple traditional techniques, such as prompt injection, jailbreak, and adversarial attacks, while also exploiting emergent properties of agent communication and collaboration. This multi-faceted nature makes MAS attacks more covert, adaptive, and challenging to detect and mitigate. Moreover, the dynamic and autonomous nature of agents allows adversaries to refine their attacks in real-time, further complicating defense mechanisms. Below, we summarize the key research related to these threats. + +Transmissive Attack. It spreads within the MAS like a virus, propagating dangerous and harmful information, including covert malicious content, continuously attacking and compromising the agents in the system. Agent Smith [829] uses adversarial attack techniques, harmful images are generated—appearing benign on the surface but embedding malicious information. These images propagate within the MAS, causing agents to be compromised and posing significant security risks. CORBA [865] introduces Contagious Recursive Blocking Attacks, which exhibit transmissibility across any topological network and can continuously drain computational resources. Lee et al. [600] introduce Prompt Infection in MAS, including data theft, scams, misinformation, and system-wide disruption, which spreads silently. Similarly, Tan et al. [866] use multimodal malicious prompts to infect other secure agents, compromising their security. + +Interference Attack. This attack focuses on how it interferes with and disrupts interactions within the MAS, emphasizing communication disruption and misinformation, which affect information transmission within the MAS and lead to a decline in its defensive capability. NetSafe [867] conducts extensive experiments, analyzing and revealing their structural dependencies and adversarial impacts. At the same time, Huang et al. [868] study how the resilience of MAS varies between different downstream tasks, system structures, and error types; Agent-in-the-Middle [869] manipulates and intercepts information in agent interactions through intermediary agents, disrupting the communication mechanism. The experiment validates the harm caused by the interruption of interactions by intermediary agents through a comparison of MAS with different topological structures. + +Strategic Attack. Strategic attack involves collaboration between agents and strategic optimization of attack methods, aiming to emphasize the cooperation and long-term impact of the attack, making it increasingly dangerous and more destructive. Evil Geniuses [870] modifies system roles, where these roles collaborate to generate malicious prompts. By simulating adversarial attacks and defenses, + +they optimize and evaluate each round of attack behavior, making the attacks increasingly dangerous to target other agents. Amayuelas et al. [871] use adversarial attack techniques to enable harmful agents in the multi-agent system to collaborate in debates to persuade other secure agents. These malicious agents may exploit superior knowledge, larger model sizes, or greater persuasion power to gain an unfair advantage. Ju et al. [872] form a multi-agent community using a two-stage attack method: persuasive injection and knowledge manipulation injection, to induce agents to spread counterfactual and harmful knowledge. + +# 6.3.2 Defense + +In response to the various attack methods mentioned above in multi-agent systems, many effective defense strategies have emerged that can be applied to MAS. Currently, many studies focus on forming agent groups to collaborate in joint defense and designing specific defense mechanisms, such as multi-round or multi-layer checks and filtering, to ensure the safety of the responses output by the MAS. Alternatively, defense can be achieved by identifying harmful agents through the propagation of malicious information and eliminating malicious sources. + +Adversarial Defense. This type of defense focuses on attack-defense confrontation, leveraging this adversarial mechanism to develop more effective defense methods or mechanisms to enhance the security of the MAS. LLAMOS [873] employs adversarial defense techniques, where defensive agents and attacking agents engage in counterinteractions, with neither fully defeating the other, thereby enhancing the robustness of the defense and improving the MAS's overall defensive capability. AutoDefense [874] proposes that agents collaborate to complete defense tasks through adversarial prompt filtering, primarily focusing on filtering harmful prompt information from LLMs. In addition to using adversarial techniques for defense, defense can also be achieved by forming a multi-agent group to engage in debates. + +Consensus Defense. To better leverage the advantages of MAS, Consensus Defense utilizes agent collaboration and consensus building for defense, employing voting, debates, and evidence-based reasoning mechanisms to establish a defense system and enhance the security of the MAS. Chern et al. [875] propose that toxicity can be reduced through multi-agent debates, and the widespread use of multi-agent interactions can lead to marginal improvements. Similarly, BlockAgent [876] proposes a Proof-of-Thought consensus mechanism that combines stake-based miner designation with multi-round debate-style voting, enabling BlockAgents to facilitate multi-agent collaboration through a structured workflow. Audit-LLM [877] proposes a pair-wise Evidence-based Multi-agent Debate mechanism, designed to defend against hallucinations by forming a MAS to detect internal threats. This approach is divided into three components: task decomposition, tool construction, and the final execution of the MAS, ultimately reaching consensus through reasoning. + +Structural Defense. Structural Defense treats the MAS as a network structure for planning defense methods, using graph analysis techniques to detect anomalies and resist attacks while incorporating knowledge from other domains + +to enrich defense strategies in MAS. G-Safeguard [878] compares agents in MAS with various topological structures to nodes in a graph, using Graph Neural Networks (GNN) [879, 880] to detect anomalies in the agents' dialogue graphs and counter adversarial attacks and misinformation within the MAS. + +# 6.4 Agent Communication Safety + +As Large Language Model (LLM)-based Agents evolve from isolated entities into interconnected MAS, the mechanisms governing communication between Agents, and their interactions with external environments and tools, have become increasingly critical. Agents exchange information and collaborate through message passing, tool invocation, and environmental interactions; these mechanisms, while essential to system functionality, also expose significant attack surfaces. Early methods [881, 882, 883, 884, 885, 886, 887] of Agent interaction often relied on ad-hoc approaches, such as shared memory [888], API calls [889] or unstructured function calls [890], leading to fragmented systems lacking unified security considerations. To address this challenge and enhance interoperability, standardized communication protocols have emerged. Examples include Anthropic's Model Context Protocol (MCP) [891] for Agent-tool interactions, Google's Agent2Agent (A2A) [892] for enterprise-level Agent collaboration, and the Agent Network Protocol (ANP) [893] for open network interoperability, along with other commonly used protocols [894, 895, 896, 897, 898, 899, 900, 901, 902, 903, 904]. However, the open design and dynamic nature of these communication mechanisms, coupled with the autonomy of the Agent, has exposed new vulnerabilities while enhancing functionality. + +# 6.4.1 Attack + +The interconnected nature of MAS, facilitated by numerous communication channels, creates a multifaceted attack surface. While individual Large Language Models (LLMs) possess inherent vulnerabilities, the interactions and communications among Agents introduce novel threats that exploit the system's collaborative dynamics. These threats target various components, including communication channels, content interpretation, and underlying protocols, with examples such as Shadowing Attacks, Naming Attacks, Context Poisoning, and Rug Pulls. + +Attacks Communication Channels. These attacks directly disrupt the transmission and routing of messages in the system, affecting both inter-Agent communications and interactions with external endpoints. For instance, Agent-in-the-Middle (AiTM) attacks [869] specifically target the core communication mechanisms of LLM-MAS. By intercepting and manipulating messages between Agents, these attacks can cause Agents to perform unintended actions, thereby compromising the entire system. Such attacks underscore the critical security vulnerabilities arising from the communication-dependent nature of Agent collaboration. Furthermore, attacks targeting communication channels and transmission processes, such as communication perturbation [905], involve adversaries injecting noise into messages in transit [906] or masquerading as legitimate sources [907], thereby compromising both the efficiency and security of Agent collaboration. + +Attacks Content. These attacks target the content of messages themselves, leveraging the mechanisms by which Agents process and interpret received information. For example, Prompt Injection involves embedding malicious instructions into data or content that Agents retrieve or receive through communication channels, thereby manipulating the Agent's behavior or decision-making processes. This technique is discussed in several works, such as [600] and [543]. Additionally, [908] explores indirect Prompt Injection within tool-based scenarios, highlighting the varied strategies employed in complex environments. + +Attacks Exploiting Multi-Agent Dynamics. These attacks leverage the interconnected structure, interaction patterns, or collective behavior of communication-driven Multi-Agent Systems (MAS) to amplify their impact or achieve strategic objectives. Contagious attacks (propagation) initiate malicious behavior on a single agent and spread it across the entire network via inter-agent communication [829, 865]. Additionally, malicious agents can coordinate through collective communication to achieve harmful goals, such as replicating malicious instructions across the network by sending replication code or commands, thereby leading to the sharing of legitimate communication keys or identity information with other malicious entities [909]. + +# 6.4.2 Defense + +To tackle threats to Agent communication, research proposes a multi-layered defense strategy addressing key points across the communication pipeline, from infrastructure to Agent-level processing. These defenses aim to prevent, detect, or mitigate attacks on channels, content, infrastructure, dynamics, and environmental factors. The strategies integrate into infrastructure and protocol design, individual Agents' message processing, and the collaborative and learning mechanisms of the MAS. + +Protocol Defenses. Protecting the foundation of Agent communication. This includes adopting standardized protocols with built-in security features (encryption, integrity checks, authentication) To counter Agent communication threats, research proposes multi-layered defense strategies targeting different points in the communication pipeline, from the underlying infrastructure to Agent-level message processing. Effective defenses aim to prevent, detect, or mitigate attacks on communication channels, content, infrastructure, such as MCP [891], A2A [892], ANP [893] standards. Establishing managed registries and identity systems for Agent and Tool/Service registration and identity management. Enforcing strong Agent identity verification and access control policies, including JIT credential provisioning. Implementing mechanisms to enforce communication dynamics, and environmental impacts. + +Content Defense. These defenses operate at the agent level, focusing on how agents process received messages and content. This includes input modification and filtering, which preprocess incoming content to neutralize adversarial elements. Agents also employ active defense mechanisms, such as reliability estimation, to assess the trustworthiness of messages based on local context, thereby mitigating the impact of untrusted information. For example, [910] proposed an active defense strategy that utilizes a reliability estimator to judge the credibility of received messages and + +employs a decomposable message aggregation policy network to reduce the influence of unreliable messages on the final decision. + +# 6.5 Agent Safety Evaluation + +Currently, there is already a substantial body of work evaluating the performance of LLM-based agent systems on different tasks [911, 912, 913, 914, 915]. In this section, we focus on benchmarks designed to assess the security of agents. Broadly speaking, these benchmarks include those that construct datasets and those that use other agents to set up sandbox environments for evaluation, each with distinct assessment priorities and specific scenarios for agent security [314, 916, 917, 918, 919]. + +TABLE 10: Benchmarks for agent safety. + +
BenchmarkDynamicLLM asEvaluatorEvaluation Focus
InjectAgent [920]Prompt Injection
AgentDojo [849]Prompt Injection
AgentBackdoorEval [816]Backdoor
RiskAwareBench [921]Embodied Agent
RedCode [916]Coding Agent
S-Eval [917]General
Bells [918]General
AgentSafetyBench [922]General
AgentSecurityBench [?]General
AgentHarm [923]General
R-Judge [314]General
ToolSowrd [924]Tool
PrivacyLens [919]Privacy
ToolEmu [925]Tool
HAIEcosystem [926]General
SafeAgentBench [927]General
JailJudge [928]Jailbreak
+ +# 6.5.1 Attack-Specific Benchmarks + +This type of benchmark focuses on testing the security of an agent when facing specific types of attacks, such as Prompt Injection [600, 929], Backdoor [817, 930, 931], and Jailbreak [874, 932]. Specifically, InjectAgent [920] evaluates LLM agents' vulnerability to indirect prompt injection attacks, measuring behavior safety when tool-integrated agents process malicious instructions embedded in external content, with hacking prompts as an enhancement. A similar work is AgentDojo [849], a dynamic, extensible evaluation framework for assessing prompt injection attacks and defenses in LLM agents by simulating realistic tasks (e.g., email management, banking) with stateful environments and multi-tool interactions under adversarial conditions. As for backdoor attacks, AgentBackdoorEval [816] includes five real-world domains (including Banking-Finance, Medical, and Social Media) with automatically generated prompts, simulated tools, and tailored backdoor triggers to assess attack stealth and effectiveness. Besides, JailJudge [928] introduces a comprehensive jailbreak evaluation benchmark featuring a voting JailJudge MultiAgent, a comprehensive JailJudgeTrain dataset, and a trained Jailjudge Guard. + +# 6.5.2 Module-Specific Benchmarks + +Currently, these benchmarks for evaluating the security of a specific module in an agent focus on the invocation of tools [933, 934, 935, 936]. For example, ToolSowrd [924] evaluates + +LLM safety in tool learning across three stages (input, execution, output) by designing six adversarial scenarios (e.g., malicious queries, noisy tool misdirection, harmful feedback). ToolEmu [925] employs an LM-emulated sandbox to simulate diverse high-stakes tool executions and scenarios, leveraging GPT-4 for both tool emulation and automatic safety/helpfulness evaluations. + +# 6.5.3 General Benchmarks + +In addition to the previously mentioned benchmarks that focus on a specific aspect of agent security, some efforts have developed more comprehensive and holistic evaluation frameworks, taking into account diverse scenarios, different agents, and various offensive and defensive techniques. For instance, AgentSafetyBench [922] assesses LLM agent safety through 2,000 test cases across 349 interactive environments, covering 8 risk categories (e.g., data leaks, physical harm) and 10 failure modes (e.g., incorrect tool calls, risk unawareness), with automated scoring via a fine-tuned model. Similarly, AgentSecurityBench [?] is a comprehensive framework that formalizes and evaluates attacks (e.g., Direct/Indirect Prompt Injection, Memory Poisoning) and defenses across 10 scenarios, 10 agents, and 13 LLM backbones, using 7 evaluation metrics. SafeAgentBench [927] evaluates embodied LLM agents' safety awareness with 750 diverse tasks (detailed, abstract, long-horizon) in SafeAgentEnv simulation environment, leveraging GPT-4 for task generation and dual evaluators (execution-based and semantic). HAIEcosystem [926] evaluates safety through multi-turn interactions between human users (benign/malicious) and AI agents across 132 scenarios, using modular sandbox environment and LLM-based dynamic risk measurement. AgentHarm [923] tests agent robustness by evaluating compliance with 110 explicitly malicious multi-step tasks across 11 harm categories, using synthetic tools and fine-grained grading rubrics. Different form previous benchmarks, RiskAwareBench [921] focuses on embodied agents, evaluating physical risk awareness via four modules: safety tip generation, risky scene generation, plan generation, and automated evaluation. + +# 6.5.4 LLM Deployment Roadmap + +In the deployment of LLMs under frozen parameters, the security landscape has evolved through a tightly coupled dynamic among attacks, defenses, and evaluation mechanisms. + +Initially, black-box attacks leveraged the generative capabilities of LLMs themselves to optimize adversarial prompts, often without precise alignment to the decision boundaries. In contrast, gradient-guided white-box methods offer greater control but face inherent limitations due to the discrete nature of token spaces resulting in prompts with weakened semantic fidelity. These attack trends have catalyzed the emergence of prompt-level defense strategies. To counter black-box attacks, recent defenses adopt prompt shaping and system-level constraints to guide and restrict the model's response behavior. For gradient-based attacks, defenses typically apply perplexity-based detection and semantic consistency checks to identify suspicious or adversarial outputs. + +The growing sophistication of defenses reshaped the requirements for evaluation. Static, one-shot rejection mechanisms have proven insufficient in multi-task and multimodal deployments, prompting the development of dynamic strategies such as response rewriting, hierarchical permission control, and consensus-based filtering across multiple models. These strategies demand richer evaluation protocols beyond single metric assessments, shifting toward behavior metrics that capture cross-input consistency, risk under specific task conditions, and adaptability to strategy switching. + +As the attack-defense interaction intensifies, the evaluation itself has become a critical driver of system evolution. Recent frameworks have introduced automated red teaming pipelines, enabling a closed-loop process where jailbreak samples are continually generated, tested against deployed defenses, and fed back to guide both adversarial strategies and defense refinement. This has laid the groundwork for a new paradigm in LLM security research: one where attack, defense, and evaluation are no longer treated in isolation but co-evolve as an interdependent, self-reinforcing system. + +# 6.5.5 LLM Deployment Perspective + +(1) Attack strategies will become more structured and semantically aligned. (i) Black-box attacks may evolve through agent-based optimization, enabling sentence-level jailbreaks with clearer intent and higher success rates. (ii) To overcome the limitations of token-level gradient attacks, future work may focus on generating semantically consistent adversarial prompts that are less detectable by perplexity-based defenses. (iii) Open-source models will serve as surrogates for closed models, allowing attackers to replicate decision boundaries before launching white-box attacks. (iv) Variants from fine-tuning pipelines may leak private information through cross-model comparison, introducing version-aware privacy risks. + +(2) Defenses will shift toward adaptive and transferable mechanisms. (i) Prompt-based defenses will evolve into context-aware controllers that adjust behavior based on input semantics and task context. (ii) Generalizable defenses that work across domains and languages will be critical for scalable deployment. (iii) Future systems may support online updates, enabling continuous refinement in response to new threats. +(3) Evaluation will act as both a diagnostic and driving force. (i) Benchmarks must expand beyond text to cover multimodal inputs and tool-based actions. (ii) Multi-objective evaluation will replace single-metric scoring, balancing safety and utility through trade-off analysis. (iii) Static test sets will give way to adaptive, streaming benchmarks that evolve with attack trends. (iv) Automated red teaming will close the loop, enabling real-time attack generation, evaluation, and defense adjustment. + +# 6.5.6 Agent Roadmap + +Agent. The evolution of LLM-based agents originated from role-playing paradigms [801, 937, 938, 939], where researchers investigated organizational structures, role allocation mechanisms, and implementation workflows for task-oriented agents in various social contexts. These systematic explorations not only demonstrated agents' potential in + +addressing human societal challenges but also spawned interdisciplinary research programs spanning sociology, organizational theory, and psychology. As the field advanced, research focus shifted toward automated agent workflows [795, 860, 940, 941], domain-specific methods for embodied intelligence, and the development of agent capabilities in tool utilization and memory management. Through this progression, agent systems have emerged as a transformative paradigm for automating human social processes, gaining significant recognition as a viable solution for complex societal automation. + +The rapid advancement of agent capabilities and architectures has brought safety concerns to the forefront of academic and industrial research. These challenges span multiple critical dimensions: tool safety, memory security, and the agent's fundamental operational integrity. Inheriting both the capabilities and vulnerabilities of their underlying LLM foundations, agents intrinsically carry these "genetic" weaknesses into more complex operational environments. This inheritance makes safety vulnerabilities particularly acute in agent systems, especially when handling sensitive real-world applications involving personal privacy and financial assets. The development of agent technologies has thus become inextricably linked with safety considerations. Recent years ( $\sim$ 2023- until now) have witnessed accelerated research in agent safety, focusing on four key frontiers: + +- Agent Brain Security: The core decision-making mechanisms. +- Tool Invocation Safety: Secure external API and tool usage. +- Memory Retrieval Protection: Robustness against memory poisoning. +- Communication Protocol Security: Safe multi-agent interactions. + +Emerging work has also begun addressing safety challenges in embodied agent scenarios, marking an important expansion of the research domain. + +# 6.5.7 Perspective + +We outline potential future research directions for agent systems and analyze their developmental trajectory: + +(1) Safety of External Agent Modules. Unlike standalone LLMs, agents interact with external modules (e.g., tools, memory), which are exposed to open environments and thus more vulnerable to attacks. Key research challenges include: (i) Tool Safety: Secure tool invocation and API usage to prevent adversarial exploitation. (ii) Memory Protection: Robustness against memory poisoning and unauthorized access, to name just a few. These external interfaces introduce unique attack surfaces, making their security a critical research priority. +(2) Stability and Reliability of Dynamically Updated Agents via Reinforcement Learning: As reinforcement learning (RL) [35, 942, 943] techniques become increasingly integrated with LLM-based agents, these systems are being deployed in more complex and dynamic environments. While this integration enhances agents' adaptability and intelligence, it also introduces significant risks: (i) Emergent Threats: Advanced RL capabilities may inadvertently enable agents to learn and propagate harmful behaviors or danger- + +ous information. (ii) Dynamic Vulnerability: Continuous online learning increases exposure to adversarial perturbations or reward hacking. + +Critical Research Directions: (i) Safe RL Frameworks: Developing constrained optimization methods to bound agent behavior within ethical and operational guardrails. (ii) Stability-Aware Updates: Designing update protocols that balance adaptability with robustness (e.g., catastrophic forgetting mitigation). (iii) Anomaly Detection: Real-time monitoring of learning trajectories to identify and neutralize hazardous knowledge acquisition. + +(3) Safety of Embodied Agents in Domain-Specific Scenarios: As autonomous agents become increasingly deployed across specialized domains, their safety considerations must account for unique domain-specific vulnerabilities. We list some key challenges as follows: + +Web Agents: +- HTML/JS injection risks during automated browsing +- Secure sandboxing requirements for DOM manipulation +- Cross-site scripting (XSS) vulnerabilities in automated form-filling + +- Communication Agents: + +- Protocol-level attacks (e.g., SIP flooding, WebRTC exploits) +- End-to-end encryption requirements for sensitive dialogues +- Authentication bypass in voice-based agents + +Robotics Control Agents: + +- Physical safety constraints in actuator commands +Real-time collision avoidance verification +- Emergency stop mechanism reliability + +Healthcare Agents: + +Medical decision audit trail requirements +- Drug interaction verification systems + +# 7 SAFETY IN LLM-BASED APPLICATION + +In this section, we focus on the security considerations that should be addressed following the commercialization of LLMs into practical applications. With the rapid development of LLMs in fields such as content creation, intelligent interaction, automated programming, medical diagnosis, and financial analysis, LLM-based applications are reshaping industry workflows and business models [944]. However, while LLMs significantly enhance productivity and facilitate human-machine collaboration, their large-scale deployment has also introduced severe security challenges [66]. Ensuring the security, reliability, and compliance of LLM-based applications has become a critical issue in AI research and real-world implementation. + +Truthfulness. Despite their powerful text generation capabilities, LLMs exhibit hallucination phenomena, generating inaccurate, misleading, or entirely fictitious content [945, 946, 947, 948, 949]. Unlike traditional errors, hallucinations are often subtle and linguistically plausible, making them especially dangerous in real-world applications. This challenge is exacerbated in high-stakes domains such as healthcare, law, and finance, where misleading AI-generated information can directly affect human safety and economic + +![](images/792d0e5c90e63607687a3b7c2093f939694dd3631a5e08614fac6eb7112e1843.jpg) +Fig. 14: We illustrate the diverse applications of AI in enterprise productivity, content generation, programming, healthcare, finance, customer support, education, and cyber-security. We also highlight critical issues related to truthfulness and privacy, including data leakage, security threats, property rights, fairness, and regulatory compliance, underscoring the need for robust safeguards in AI deployment + +stability. For example, an LLM-powered clinical assistant may suggest nonexistent diseases or cite unverified treatments, posing risks to patients [739, 950], while financial advisors powered by LLMs might generate persuasive but flawed market forecasts, leading to significant capital misallocation or systemic financial vulnerabilities [951]. Specifically, hallucination is not merely a surface-level output flaw but a systemic artifact rooted in the model's training dynamics and the nature of its data. Specifically, hallucination can stem from three compounding factors: (1) semantic overgeneralization due to exposure to noisy, unverified, or synthetic pretraining corpora; (2) objective misalignment, where maximum-likelihood or reinforcement-based training prioritizes coherence and helpfulness over factual accuracy; and (3) latent distribution shifts between pretraining and deployment-time inputs, particularly under long-tail or adversarial queries [952, 953]. These factors jointly reinforce spurious correlations and amplify unsupported generations, even in otherwise well-aligned models. In sum, hallucination represents a critical bottleneck for the reliable deployment of LLMs. Its mitigation is foundational not only for improving user trust but also for enabling the safe integration of LLMs into high-stakes decision-making + +workflows. + +Privacy. Data privacy concerns [954] represent another significant challenge in LLM deployment [821, 955]. Training these models requires vast amounts of text data, which may include personal information, corporate secrets, and medical records [956]. If an LLM inadvertently leaks sensitive training data or lacks robust access control mechanisms, users' private information could be exploited or misused. In corporate settings, LLMs may unintentionally expose confidential documents or sensitive customer data, leading to severe compliance and legal risks. Moreover, inference-time attacks [957], such as membership inference and model extraction, can further expose sensitive data by allowing adversaries to infer training set membership or replicate model behavior. Therefore, LLM-based applications must incorporate data protection measures and privacy-preserving techniques like differential privacy and query rate limiting to mitigate information leakage risks. + +Robustness. Prompt injection [543] and jailbreak [636] risks pose additional security threats. Attackers can craft adversarial prompts to bypass security restrictions, causing the model to generate harmful or unauthorized content. For example, in chatbot systems, malicious users could manip + +ulate LLMs to generate hate speech, disinformation, or even harmful instructions. Similarly, in AI-powered coding assistants such as GitHub Copilot, attackers may exploit LLMs to produce code with security vulnerabilities, potentially serving as backdoors for future cyberattacks. Developing robust security defenses to prevent LLMs from being misused in real-world applications is crucial for AI safety. + +Copyright. Another pressing concern is intellectual property and copyright protection [958, 959, 960]. LLMs are trained on vast datasets that often include copyrighted texts, source code, and artistic works, raising potential infringement risks. When generating content, LLMs may inadvertently replicate or closely mimic copyrighted material, leading to legal disputes. For instance, AI-powered writing tools might generate articles resembling published works, while coding assistants could produce open-source code snippets without proper licensing [961]. This not only raises concerns about content originality but also introduces legal and ethical dilemmas. Addressing these challenges requires watermarking [962, 963], provenance tracking, and clear copyright attribution mechanisms to ensure responsible AI-generated content management [178]. + +Ethical and Social Responsibility. Beyond technical concerns, ethical and social responsibility are also critical factors in large-scale LLM deployment. Due to biases in training data, LLMs may generate content that reinforces stereotypes, gender discrimination, or racial biases [964, 965]. In sectors such as hiring, finance, and healthcare, biased AI-generated recommendations could exacerbate existing inequalities and lead to unfair decision-making. Moreover, as LLMs become increasingly integrated into virtual assistants, social media, and news distribution platforms, concerns over AI-generated misinformation, transparency, and accountability are growing. Building fair, transparent, and trustworthy AI governance frameworks is thus essential to mitigating AI-induced social risks. + +Governance. As governments worldwide strengthen AI regulations, LLM-related legal and compliance requirements are evolving rapidly. The EU AI Act classifies LLMs as high-risk AI systems, requiring developers to provide transparency reports and risk control mechanisms [966]. China's Generative AI Regulations mandate AI-generated content to align with ethical standards and undergo governmental scrutiny [967]. In the United States, regulatory discussions emphasize AI transparency and data privacy protections, urging businesses to establish responsible AI practices [968]. These policy developments indicate that LLM-based applications must comply with regional regulations while maintaining a balance between compliance and innovation. + +In summary, while LLM-based applications drive technological progress, they also introduce multifaceted challenges related to misinformation, data privacy, adversarial manipulation, copyright infringement, ethical concerns, and regulatory compliance (refer to Figure 14). These issues not only impact the trustworthiness and legality of AI technologies but also have far-reaching implications for social trust, legal accountability, and business sustainability. Addressing these challenges necessitates a comprehensive approach that integrates privacy protection, content governance, copyright management, ethical safeguards, and regulatory compli + +ance, alongside collaborative efforts from both academia and industry. + +# 8 POTENTIAL RESEARCH DIRECTIONS + +Through a systematic and comprehensive examination of safety across the entire lifecycle of LLMs, we have identified valuable insights for future research: + +* Data generation holds immense potential, particularly in ensuring the safety of generated data and automating the data generation process, which is crucial for reliable and robust model training. Reliable data generation is fundamental to the integrity of model training. +$\star$ Post-training phases are becoming increasingly critical. Ensuring secure fine-tuning and alignment of data is a key future direction, closely intertwined with data generation. As concepts proliferate, multi-objective alignment may emerge as a significant area of focus. +$\star$ Model editing and unlearning safety are paramount for efficient model updates and deployment. Current learning efficiencies are suboptimal, and advancements in these technologies could revolutionize how models acquire new knowledge, enabling continuous and efficient learning (potentially even localized memory learning). These techniques might surpass traditional SGD algorithms, but safety measures are essential to prevent models from devolving into malicious entities that contradict human intentions. +$\star$ LLM agents, in the final deployment stage, require robust safety assurances. Ensuring the security of agent tools and agent memory, as well as addressing safety in embodied intelligence scenarios such as web agents and computer agents, are critical areas for further investigation. + +# 9 CONCLUSION + +In this survey, we provide a comprehensive analysis of the safety concerns across the entire lifecycle of LLMs, from data preparation and pre-training to post-training, deployment, and commercialization. By introducing the concept of "fullstack" safety, we offer an integrated view of the security and safety issues faced by LLMs throughout their development and usage, which addresses gaps in the existing literature that typically focus on specific stages of the lifecycle. + +Through an exhaustive review of over $900+$ papers, we systematically examined and organized the safety issues spanning key stages of LLM production, deployment, and use, including data generation, alignment techniques, model editing, and LLM-based agent systems and LLM-based applications. Our findings highlight the critical vulnerabilities at each stage, such as privacy risks, toxic data, harmful fine-tuning attacks, and deployment challenges. The safety of LLMs is a multifaceted issue requiring careful attention to data integrity, model alignment, and post-deployment security measures. Moreover, we propose promising directions for future research, including improvements in data safety, alignment techniques, and defense mechanisms for LLM-based agents. This work is vital for guiding future efforts to make LLMs safer and more reliable, especially as they become increasingly integral to various industries + +and applications. Ensuring robust security across the entire LLM lifecycle is crucial for their responsible and effective deployment in real-world scenarios. + +# REFERENCES + +[1] L. Ouyang, J. Wu, X. Jiang, D. Almeida, C. Wainwright, P. Mishkin, C. Zhang, S. Agarwal, K. Slama, A. Ray et al., "Training language models to follow instructions with human feedback," Advances in neural information processing systems, vol. 35, pp. 27730-27744, 2022. +[2] H. Touvron, T. Lavril, G. Izacard, X. Martinet, M.-A. Lachaux, T. Lacroix, B. Rozière, N. Goyal, E. Hambro, F. Azhar et al., "Llama: Open and efficient foundation language models," arXiv preprint arXiv:2302.13971, 2023. +[3] J. Bai, S. Bai, Y. Chu, Z. Cui, K. Dang, X. Deng, Y. Fan, W. Ge, Y. Han, F. Huang et al., "Qwen technical report," arXiv preprint arXiv:2309.16609, 2023. +[4] A. Liu, B. Feng, B. Xue, B. Wang, B. Wu, C. Lu, C. Zhao, C. Deng, C. Zhang, C. Ruan et al., "Deepseek-v3 technical report," arXiv preprint arXiv:2412.19437, 2024. +[5] D. Guo, D. Yang, H. Zhang, J. Song, R. Zhang, R. Xu, Q. Zhu, S. Ma, P. Wang, X. Bi et al., "Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning," arXiv preprint arXiv:2501.12948, 2025. +[6] W. X. Zhao, K. Zhou, J. Li, T. Tang, X. Wang, Y. Hou, Y. Min, B. Zhang, J. Zhang, Z. Dong et al., "A survey of large language models," arXiv preprint arXiv:2303.18223, vol. 1, no. 2, 2023. +[7] Y. Chang, X. Wang, J. Wang, Y. Wu, L. Yang, K. Zhu, H. Chen, X. Yi, C. Wang, Y. Wang et al., "A survey on evaluation of large language models," ACM transactions on intelligent systems and technology, vol. 15, no. 3, pp. 1-45, 2024. +[8] M. U. Hadi, R. Qureshi, A. Shah, M. Irfan, A. Zafar, M. B. Shaikh, N. Akhtar, J. Wu, S. Mirjalili et al., "A survey on large language models: Applications, challenges, limitations, and practical usage," Authorea Preprints, vol. 3, 2023. +[9] Y. Yan, S. Wang, J. Huo, J. Ye, Z. Chu, X. Hu, P. S. Yu, C. Gomes, B. Selman, and Q. Wen, "Position: Multimodal large language models can significantly advance scientific reasoning," arXiv preprint arXiv:2502.02871, 2025. +[10] Y. Yan, J. Su, J. He, F. Fu, X. Zheng, Y. Lyu, K. Wang, S. Wang, Q. Wen, and X. Hu, “A survey of mathematical reasoning in the era of multimodal large language model: Benchmark, method & challenges,” arXiv preprint arXiv:2412.11936, 2024. +[11] X. Zou, Y. Yan, X. Hao, Y. Hu, H. Wen, E. Liu, J. Zhang, Y. Li, T. Li, Y. Zheng et al., "Deep learning for cross-domain data fusion in urban computing: Taxonomy, advances, and outlook," Information Fusion, vol. 113, p. 102606, 2025. +[12] Y. Li, X. Zhang, L. Luo, H. Chang, Y. Ren, I. King, and J. Li, “G-refer: Graph retrieval-augmented large + +language model for explainable recommendation," arXiv preprint arXiv:2502.12586, 2025. +[13] S. Sun, R. Liu, J. Lyu, J.-W. Yang, L. Zhang, and X. Li, "A large language model-driven reward design framework via dynamic feedback for reinforcement learning," arXiv preprint arXiv:2410.14660, 2024. +[14] S. Sonko, A. O. Adewusi, O. C. Obi, S. Onwusinkwue, and A. Atadoga, “A critical review towards artificial general intelligence: Challenges, ethical considerations, and the path forward,” World Journal of Advanced Research and Reviews, vol. 21, no. 3, pp. 1262-1268, 2024. +[15] S. McLean, G. J. Read, J. Thompson, C. Baber, N. A. Stanton, and P. M. Salmon, "The risks associated with artificial general intelligence: A systematic review," Journal of Experimental & Theoretical Artificial Intelligence, vol. 35, no. 5, pp. 649-663, 2023. +[16] R. Liu, J. Gao, J. Zhao, K. Zhang, X. Li, B. Qi, W. Ouyang, and B. Zhou, "Can 1b llm surpass 405b llm? rethinking compute-optimal test-time scaling," arXiv preprint arXiv:2502.06703, 2025. +[17] J. Ruan, Y. Chen, B. Zhang, Z. Xu, T. Bao, H. Mao, Z. Li, X. Zeng, R. Zhao et al., "Tptu: Task planning and tool usage of large language model-based ai agents," in NeurIPS 2023 Foundation Models for Decision Making Workshop, 2023. +[18] V. Sorin, E. Klang, M. Sklair-Levy, I. Cohen, D. B. Zippel, N. Balint Lahat, E. Konen, and Y. Barash, "Large language model (chatgpt) as a support tool for breast tumor board," NPJ Breast Cancer, vol. 9, no. 1, p. 44, 2023. +[19] R. Yang, L. Song, Y. Li, S. Zhao, Y. Ge, X. Li, and Y. Shan, "Gpt4tools: Teaching large language model to use tools via self-instruction," Advances in Neural Information Processing Systems, vol. 36, pp. 71-995-72007, 2023. +[20] T. Schick, J. Dwivedi-Yu, R. Dessi, R. Raileanu, M. Lomeli, E. Hambro, L. Zettlemoyer, N. Cancedda, and T. Scialom, "Toolformer: Language models can teach themselves to use tools," Advances in Neural Information Processing Systems, vol. 36, pp. 68-59-68-551, 2023. +[21] W. Zhong, L. Guo, Q. Gao, H. Ye, and Y. Wang, "Memorybank: Enhancing large language models with long-term memory," in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 17, 2024, pp. 19724-19731. +[22] W. Wang, L. Dong, H. Cheng, X. Liu, X. Yan, J. Gao, and F. Wei, "Augmenting language models with long-term memory," Advances in Neural Information Processing Systems, vol. 36, pp. 74530-74543, 2023. +[23] Z. Zhang, X. Bo, C. Ma, R. Li, X. Chen, Q. Dai, J. Zhu, Z. Dong, and J.-R. Wen, "A survey on the memory mechanism of large language model based agents," arXiv preprint arXiv:2404.13501, 2024. +[24] J. Huo, Y. Yan, B. Hu, Y. Yue, and X. Hu, "Mmneuron: Discovering neuron-level domain-specific interpretation in multimodal large language model," arXiv preprint arXiv:2406.11193, 2024. +[25] W. Liu, X. Huang, X. Zeng, X. Hao, S. Yu, D. Li, S. Wang, W. Gan, Z. Liu, Y. Yu et al., "Toolace: Win + +ning the points of llm function calling," arXiv preprint arXiv:2409.00920, 2024. +[26] Q. Tang, Z. Deng, H. Lin, X. Han, Q. Liang, B. Cao, and L. Sun, "Toolalpaca: Generalized tool learning for language models with 3000 simulated cases," arXiv preprint arXiv:2306.05301, 2023. +[27] T. Guo, X. Chen, Y. Wang, R. Chang, S. Pei, N. V. Chawla, O. Wiest, and X. Zhang, "Large language model based multi-agents: A survey of progress and challenges," arXiv preprint arXiv:2402.01680, 2024. +[28] L. Wang, C. Ma, X. Feng, Z. Zhang, H. Yang, J. Zhang, Z. Chen, J. Tang, X. Chen, Y. Lin et al., "A survey on large language model based autonomous agents," Frontiers of Computer Science, vol. 18, no. 6, p. 186345, 2024. +[29] Z. Xi, W. Chen, X. Guo, W. He, Y. Ding, B. Hong, M. Zhang, J. Wang, S. Jin, E. Zhou et al., "The rise and potential of large language model based agents: A survey," Science China Information Sciences, vol. 68, no. 2, p. 121101, 2025. +[30] Y. Yan and J. Lee, "Georeasoner: Reasoning on geospatially grounded context for natural language understanding," in Proceedings of the 33rd ACM International Conference on Information and Knowledge Management, 2024, pp. 4163-4167. +[31] A. Majumdar, K. Yadav, S. Arnaud, J. Ma, C. Chen, S. Silwal, A. Jain, V-P. Berges, T. Wu, J. Vakil et al., "Where are we in the search for an artificial visual cortex for embodied intelligence?" Advances in Neural Information Processing Systems, vol. 36, pp. 655-677, 2023. +[32] M. Zhou, H. Dong, H. Song, N. Zheng, W.-H. Chen, and H. Wang, "Embodied intelligence-based perception, decision-making, and control for autonomous operations of rail transportation," IEEE Transactions on Intelligent Vehicles, 2024. +[33] X. Ma, Y. Gao, Y. Wang, R. Wang, X. Wang, Y. Sun, Y. Ding, H. Xu, Y. Chen, Y. Zhao et al., "Safety at scale: A comprehensive survey of large model safety," arXiv preprint arXiv:2502.05206, 2025. +[34] K. Kumar, T. Ashraf, O. Thawakar, R. M. Anwer, H. Cholakkal, M. Shah, M.-H. Yang, P. H. Torr, S. Khan, and F. S. Khan, "Llm post-training: A deep dive into reasoning large language models," arXiv preprint arXiv:2502.21321, 2025. +[35] Z.-Z. Li, D. Zhang, M.-L. Zhang, J. Zhang, Z. Liu, Y. Yao, H. Xu, J. Zheng, P.-J. Wang, X. Chen et al., "From system 1 to system 2: A survey of reasoning large language models," arXiv preprint arXiv:2502.17419, 2025. +[36] Y. Chen, W. Sun, C. Fang, Z. Chen, Y. Ge, T. Han, Q. Zhang, Y. Liu, Z. Chen, and B. Xu, "Security of language models for code: A systematic literature review," ACM Transactions on Software Engineering and Methodology, vol. 1, no. 1, pp. 1-66, 2025. +[37] W. Qu, Y. Zhou, Y. Wu, T. Xiao, B. Yuan, Y. Li, and J. Zhang, "Prompt inversion attack against collaborative inference of large language models," in IEEE S&P, 2025. +[38] J. Wu, S. Yang, R. Zhan, Y. Yuan, L. S. Chao, and D. F. Wong, "A survey on llm-generated text detection: Ne + +cessity, methods, and future directions," Computational Linguistics, pp. 1-66, 2025. +[39] H. Wang, J. Li, H. Wu, E. Hovy, and Y. Sun, "Pre-trained language models and their applications," *Engineering*, vol. 25, pp. 51-65, 2023. +[40] C. Zhou, Q. Li, C. Li, J. Yu, Y. Liu, G. Wang, K. Zhang, C. Ji, Q. Yan, L. He et al., "A comprehensive survey on pretrained foundation models: A history from bert to chatgpt," International Journal of Machine Learning and Cybernetics, pp. 1-65, 2024. +[41] X. Zhang, X. Zhu, and L. Lessard, "Online data poisoning attacks," in Learning for Dynamics and Control. PMLR, 2020, pp. 201-210. +[42] M. Goldblum, D. Tsipras, C. Xie, X. Chen, A. Schwarzschild, D. Song, A. Madry, B. Li, and T. Goldstein, "Dataset security for machine learning: Data poisoning, backdoor attacks, and defenses," IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 45, no. 2, pp. 1563-1580, 2022. +[43] N. Lukas, A. Salem, R. Sim, S. Tople, L. Wutschitz, and S. Zanella-Béguelin, "Analyzing leakage of personally identifiable information in language models," in 2023 IEEE Symposium on Security and Privacy (SP). IEEE, 2023, pp. 346-363. +[44] W. Sun, Y. Chen, C. Fang, Y. Feng, Y. Xiao, A. Guo, Q. Zhang, Y. Liu, B. Xu, and Z. Chen, "Eliminating backdoors in neural code models for secure code understanding," in Proceedings of the 33rd ACM International Conference on the Foundations of Software Engineering. Trondheim, Norway: ACM, Mon 23 - Fri 27 June 2025, pp. 1-23. +[45] H. R. Kirk, B. Vidgen, P. Röttger, and S. A. Hale, "The benefits, risks and bounds of personalizing the alignment of large language models to individuals," Nature Machine Intelligence, vol. 6, no. 4, pp. 383-392, 2024. +[46] Z. Zhou, H. Yu, X. Zhang, R. Xu, F. Huang, and Y. Li, "How alignment and jailbreak work: Explain llm safety through intermediate hidden states," in Findings of the Association for Computational Linguistics: EMNLP 2024, 2024, pp. 2461-2488. +[47] X. Qi, Y. Zeng, T. Xie, P.-Y. Chen, R. Jia, P. Mittal, and P. Henderson, "Fine-tuning aligned language models compromises safety, even when users do not intend to!" in ICLR, 2024. [Online]. Available: https://openreview.net/forum?id=hTEGyKf0dZ +[48] X. Qi, A. Panda, K. Lyu, X. Ma, S. Roy, A. Beirami, P. Mittal, and P. Henderson, "Safety alignment should be made more than just a few tokens deep," in The Thirteen International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=6Mxhg9PtDE +[49] D. Halawi, A. Wei, E. Wallace, T. T. Wang, N. Hagh-talab, and J. Steinhardt, "Covert malicious finetuning: Challenges in safeguarding LLM adaptation," in Proceedings of the 41st International Conference on Machine Learning. PMLR, 2024, pp. 17298-17312. +[50] W. Hawkins, B. Mittelstadt, and C. Russell, "The effect of fine-tuning on language model toxicity," in Neurips Safe Generative AI Workshop 2024, 2024. +[51] J. Huang and J. Zhang, "A survey on evaluation of + +multimodal large language models," arXiv preprint arXiv:2408.15769, 2024. +[52] P. Röttger, F. Pernisi, B. Vidgen, and D. Hovy, "Safetyprompts: a systematic review of open datasets for evaluating and improving large language model safety," arXiv preprint arXiv:2404.05399, 2024. +[53] Y. Dong, R. Mu, Y. Zhang, S. Sun, T. Zhang, C. Wu, G. Jin, Y. Qi, J. Hu, J. Meng et al., "Safeguarding large language models: A survey," arXiv preprint arXiv:2406.02622, 2024. +[54] Y. Wang, Y. Pan, Q. Zhao, Y. Deng, Z. Su, L. Du, and T. H. Luan, "Large model agents: State-of-the-art, cooperation paradigms, security and privacy, and future trends," arXiv preprint arXiv:2409.14457, 2024. +[55] G. Zhang, K. Chen, G. Wan, H. Chang, H. Cheng, K. Wang, S. Hu, and L. Bai, "Evoflow: Evolving diverse agentic workflows on the fly," arXiv preprint arXiv:2502.07373, 2025. +[56] G. Zhang, L. Niu, J. Fang, K. Wang, L. Bai, and X. Wang, "Multi-agent architecture search via agentic supernet," arXiv preprint arXiv:2502.04180, 2025. +[57] G. Zhang, Y. Yue, Z. Li, S. Yun, G. Wan, K. Wang, D. Cheng, J. X. Yu, and T. Chen, "Cut the crap: An economical communication pipeline for llm-based multi-agent systems," arXiv preprint arXiv:2410.02506, 2024. +[58] Y. Yue, G. Zhang, B. Liu, G. Wan, K. Wang, D. Cheng, and Y. Qi, "Masrouter: Learning to route llms for multi-agent systems," 2025. [Online]. Available: https://arxiv.org/abs/2502.11133 +[59] Z. Liang, Y. Xu, Y. Hong, P. Shang, Q. Wang, Q. Fu, and K. Liu, "A survey of multimodel large language models," in Proceedings of the 3rd International Conference on Computer, Artificial Intelligence and Control Engineering, 2024, pp. 405-409. +[60] S. Zhang, L. Dong, X. Li, S. Zhang, X. Sun, S. Wang, J. Li, R. Hu, T. Zhang, F. Wu et al., "Instruction tuning for large language models: A survey," arXiv preprint arXiv:2308.10792, 2023. +[61] H. Zhao, H. Chen, F. Yang, N. Liu, H. Deng, H. Cai, S. Wang, D. Yin, and M. Du, "Explainability for large language models: A survey," ACM Transactions on Intelligent Systems and Technology, vol. 15, no. 2, pp. 1-38, 2024. +[62] T. Shen, R. Jin, Y. Huang, C. Liu, W. Dong, Z. Guo, X. Wu, Y. Liu, and D. Xiong, "Large language model alignment: A survey," arXiv preprint arXiv:2309.15025, 2023. +[63] M. A. K. Raiaan, M. S. H. Mukta, K. Fatema, N. M. Fahad, S. Sakib, M. M. J. Mim, J. Ahmad, M. E. Ali, and S. Azam, "A review on large language models: Architectures, applications, taxonomies, open issues and challenges," IEEE access, vol. 12, pp. 26839-26874, 2024. +[64] K. S. Kalyan, "A survey of gpt-3 family large language models including chatgpt and gpt-4," Natural Language Processing Journal, vol. 6, p. 100048, 2024. +[65] E. Shayegani, M. A. A. Mamun, Y. Fu, P. Zaree, Y. Dong, and N. Abu-Ghazaleh, "Survey of vulnerabilities in large language models revealed by adversarial attacks," arXiv preprint arXiv:2310.10844, 2023. + +[66] Y. Yao, J. Duan, K. Xu, Y. Cai, Z. Sun, and Y. Zhang, "A survey on large language model (llm) security and privacy: The good, the bad, and the ugly," High-Confidence Computing, p. 100211, 2024. +[67] L. Qin, Q. Chen, Y. Zhou, Z. Chen, Y. Li, L. Liao, M. Li, W. Che, and P. S. Yu, "Multilingual large language model: A survey of resources, taxonomy and frontiers," arXiv preprint arXiv:2404.04925, 2024. +[68] M. U. Hadi, R. Qureshi, A. Shah, M. Irfan, A. Zafar, M. B. Shaikh, N. Akhtar, J. Wu, S. Mirjalili et al., "Large language models: a comprehensive survey of its applications, challenges, limitations, and future prospects," Authorea Preprints, vol. 1, pp. 1-26, 2023. +[69] L. Sun, Y. Huang, H. Wang, S. Wu, Q. Zhang, C. Gao, Y. Huang, W. Lyu, Y. Zhang, X. Li et al., "Trustllm: Trustworthiness in large language models," arXiv preprint arXiv:2401.05561, vol. 3, 2024. +[70] B. C. Das, M. H. Amini, and Y. Wu, "Security and privacy challenges of large language models: A survey," ACM Computing Surveys, vol. 57, no. 6, pp. 1-39, 2025. +[71] F. He, T. Zhu, D. Ye, B. Liu, W. Zhou, and P. S. Yu, "The emerged security and privacy of llm agent: A survey with case studies," arXiv preprint arXiv:2407.19354, 2024. +[72] G. Tie, Z. Zhao, D. Song, F. Wei, R. Zhou, Y. Dai, W. Yin, Z. Yang, J. Yan, Y. Su et al., "A survey on post-training of large language models," arXiv preprint arXiv:2503.06072, 2025. +[73] Y. Huang, C. Gao, S. Wu, H. Wang, X. Wang, Y. Zhou, Y. Wang, J. Ye, J. Shi, Q. Zhang et al., "On the trustworthiness of generative foundation models: Guideline, assessment, and perspective," arXiv preprint arXiv:2502.14296, 2025. +[74] M. Yu, F. Meng, X. Zhou, S. Wang, J. Mao, L. Pang, T. Chen, K. Wang, X. Li, Y. Zhang et al., "A survey on trustworthy llm agents: Threats and countermeasures," arXiv preprint arXiv:2503.09648, 2025. +[75] X. Ma, Y. Gao, Y. Wang, R. Wang, X. Wang, Y. Sun, Y. Ding, H. Xu, Y. Chen, Y. Zhao, H. Huang, Y. Li, J. Zhang, X. Zheng, Y. Bai, Z. Wu, X. Qiu, J. Zhang, Y. Li, J. Sun, C. Wang, J. Gu, B. Wu, S. Chen, T. Zhang, Y. Liu, M. Gong, T. Liu, S. Pan, C. Xie, T. Pang, Y. Dong, R. Jia, Y. Zhang, S. Ma, X. Zhang, N. Gong, C. Xiao, S. Erfani, B. Li, M. Sugiyama, D. Tao, J. Bailey, and Y.-G. Jiang, "Safety at scale: A comprehensive survey of large model safety," 2025. [Online]. Available: https://arxiv.org/abs/2502.05206 +[76] Y. Huang, L. Sun, H. Wang, S. Wu, Q. Zhang, Y. Li, C. Gao, Y. Huang, W. Lyu, Y. Zhang et al., "Position: Trustllm: Trustworthiness in large language models," in International Conference on Machine Learning. PMLR, 2024, pp. 20166-20270. +[77] Z. Dong, Z. Zhou, C. Yang, J. Shao, and Y. Qiao, "Attacks, defenses and evaluations for llm conversation safety: A survey," arXiv preprint arXiv:2402.09283, 2024. +[78] G. Penedo, Q. Malartic, D. Hesslow, R. Cojocaru, A. Cappelli, H. Alobeidli, B. Pannier, E. Almazrouei, and J. Launay, "The refined web dataset for falcon llm: outperforming curated corpora with web data, and web data only," arXiv preprint arXiv:2306.01116, 2023. + +[79] L. Soldaini, R. Kinney, A. Bhagia, D. Schwenk, D. Atkinson, R. Authur, B. Bogin, K. Chandu, J. Dumas, Y. Elazar et al., "Dolma: An open corpus of three trillion tokens for language model pretraining research," arXiv preprint arXiv:2402.00159, 2024. +[80] J. Kaddour, J. Harris, M. Mozes, H. Bradley, R. Raileanu, and R. McHardy, "Challenges and applications of large language models," arXiv preprint arXiv:2307.10169, 2023. +[81] W. Sun, Y. Chen, G. Tao, C. Fang, X. Zhang, Q. Zhang, and B. Luo, "Backdooring neural code search," in Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics. Toronto, Canada: Association for Computational Linguistics, July 9-14 2023, pp. 9692-9708. +[82] W. Sun, Y. Chen, M. Yuan, C. Fan, Z. Chen, C. Wang, Y. Liu, B. Xu, and Z. Chen, "Show me your code! kill code poisoning: A lightweight method based on code naturalness," in Proceedings of the IEEE/ACM 47th International Conference on Software Engineering. Ottawa, Ontario, Canada: IEEE Computer Society, Sun 27 April - Sat 3 May 2025, pp. 1-13. +[83] N. Carlini, M. Jagielski, C. A. Choquette-Choo, D. Paleka, W. Pearce, H. Anderson, A. Terzis, K. Thomas, and F. Tramèr, "Poisoning web-scale training datasets is practical," in 2024 IEEE Symposium on Security and Privacy (SP). IEEE, 2024, pp. 407-425. +[84] Y. Zhang, J. Rando, I. Evtimov, J. Chi, E. M. Smith, N. Carlini, F. Tramér, and D. Ippolito, "Persistent pre-training poisoning of llms," arXiv preprint arXiv:2410.13722, 2024. +[85] E. Wallace, T. Z. Zhao, S. Feng, and S. Singh, "Concealed data poisoning attacks on nlp models," arXiv preprint arXiv:2010.12563, 2020. +[86] B. Yan, K. Li, M. Xu, Y. Dong, Y. Zhang, Z. Ren, and X. Cheng, "On protecting the data privacy of large language models (llms): A survey," arXiv preprint arXiv:2403.05156, 2024. +[87] N. Kandpal, E. Wallace, and C. Raffel, "Deduplicating training data mitigates privacy risks in language models," in International Conference on Machine Learning. PMLR, 2022, pp. 10697-10707. +[88] N. Carlini, D. Ippolito, M. Jagielski, K. Lee, F. Tramer, and C. Zhang, “Quantifying memorization across neural language models,” in The Eleventh International Conference on Learning Representations, 2022. +[89] C. Arnett, E. Jones, I. P. Yamshchikov, and P.-C. Langlais, "Toxicity of the commons: Curating open-source pre-training data," arXiv preprint arXiv:2410.22587, 2024. +[90] K. Lee, D. Ippolito, A. Nystrom, C. Zhang, D. Eck, C. Callison-Burch, and N. Carlini, “Deduplicating training data makes language models better,” arXiv preprint arXiv:2107.06499, 2021. +[91] Y. Li, Y. Jiang, Z. Li, and S. Xia, "Backdoor learning: A survey." IEEE Transactions on Neural Networks and Learning Systems, vol. 35, no. 1, pp. 5-22, 2024. +[92] Y. Zeng, M. Pan, H. Jahagirdar, M. Jin, L. Lyu, and R. Jia, "How to sift out a clean data subset in the presence of data poisoning?" arXiv preprint arXiv:2210.06516, 2022. + +[93] M. Pan, Y. Zeng, L. Lyu, X. Lin, and R. Jia, “{ASSET}: Robust backdoor data detection across a multiplicity of deep learning paradigms,” in 32nd USENIX Security Symposium (USENIX Security 23), 2023, pp. 2725–2742. +[94] Z. Zhang, L. Lyu, W. Wang, L. Sun, and X. Sun, "How to inject backdoors with better consistency: Logit anchoring on clean data," in International Conference on Learning Representations, 2022. +[95] Z. Zhang, L. Lyu, X. Ma, C. Wang, and X. Sun, "Fine-mixing: Mitigating backdoors in fine-tuned language models," arXiv preprint arXiv:2210.09545, 2022. +[96] X. Sun, X. Li, Y. Meng, X. Ao, L. Lyu, J. Li, and T. Zhang, "Defending against backdoor attacks in natural language generation," in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 37, no. 4, 2023, pp. 5257-5265. +[97] S. Longpre, G. Yauney, E. Reif, K. Lee, A. Roberts, B. Zoph, D. Zhou, J. Wei, K. Robinson, D. Mimno et al., "A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity," in Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), 2024, pp. 3245-3276. +[98] S. Neel and P. Chang, "Privacy issues in large language models: A survey," arXiv preprint arXiv:2312.06717, 2023. +[99] X. Wu, R. Duan, and J. Ni, "Unveiling security, privacy, and ethical concerns of chatgpt," Journal of Information and Intelligence, vol. 2, no. 2, pp. 102-115, 2024. +[100] M. Gupta, C. Akiri, K. Aryal, E. Parker, and L. Praharaj, "From chatgpt to threatgpt: Impact of generative ai in cybersecurity and privacy," IEEE Access, vol. 11, pp. 80218-80245, 2023. +[101] M. Miranda, E. S. Ruzzetti, A. Santilli, F. M. Zanzotto, S. Bratières, and E. Rodolà, “Preserving privacy in large language models: A survey on current threats and solutions,” arXiv preprint arXiv:2408.05212, 2024. +[102] Q. Zhang, H. Qiu, D. Wang, Y. Li, T. Zhang, W. Zhu, H. Weng, L. Yan, and C. Zhang, “A benchmark for semantic sensitive information in llms outputs,” in The Thirteenth International Conference on Learning Representations, 2025. +[103] S. Kim, S. Yun, H. Lee, M. Gubri, S. Yoon, and S. J. Oh, "Propile: C," Advances in Neural Information Processing Systems, vol. 36, pp. 20750-20762, 2023. +[104] H. Li, D. Guo, W. Fan, M. Xu, J. Huang, F. Meng, and Y. Song, "Multi-step jailbreaking privacy attacks on chatgpt," arXiv preprint arXiv:2304.05197, 2023. +[105] M. S. Ozdayi, C. Peris, J. FitzGerald, C. Dupuy, J. Majmudar, H. Khan, R. Parikh, and R. Gupta, "Controlling the extraction of memorized data from large language models via prompt-tuning," arXiv preprint arXiv:2305.11759, 2023. +[106] N. Carlini, C. Liu, U. Erlingsson, J. Kos, and D. Song, "The secret sharer: Evaluating and testing unintended memorization in neural networks," in 28th USENIX security symposium (USENIX security 19), 2019, pp. 267-284. +[107] M. Nasr, N. Carlini, J. Hayase, M. Jagielski, A. F. Cooper, D. Ippolito, C. A. Choquette-Choo, E. Wallace, + +F. Tramér, and K. Lee, "Scalable extraction of training data from (production) language models," arXiv preprint arXiv:2311.17035, 2023. +[108] N. Carlini, F. Tramer, E. Wallace, M. Jagielski, A. Herbert-Voss, K. Lee, A. Roberts, T. Brown, D. Song, U. Erlingsson et al., "Extracting training data from large language models," in 30th USENIX security symposium (USENIX Security 21), 2021, pp. 2633-2650. +[109] Y. Bai, G. Pei, J. Gu, Y. Yang, and X. Ma, "Special characters attack: Toward scalable training data extraction from large language models," arXiv preprint arXiv:2405.05990, 2024. +[110] Z. Zhou, J. Xiang, C. Chen, and S. Su, “Quantifying and analyzing entity-level memorization in large language models,” in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 17, 2024, pp. 19741-19749. +[111] X. Yang, Z. Wen, W. Qu, Z. Chen, Z. Xiang, B. Chen, and H. Yao, “Memorization and privacy risks in domain-specific large language models,” in ICLR 2024 Workshop on Reliable and Responsible Foundation Models, 2024. +[112] R. Shokri, M. Stronati, C. Song, and V. Shmatikov, "Membership inference attacks against machine learning models," in 2017 IEEE symposium on security and privacy (SP). IEEE, 2017, pp. 3-18. +[113] H. Hu, Z. Salcic, L. Sun, G. Dobbie, P. S. Yu, and X. Zhang, "Membership inference attacks on machine learning: A survey," ACM Computing Surveys (CSUR), vol. 54, no. 11s, pp. 1-37, 2022. +[114] N. Carlini, S. Chien, M. Nasr, S. Song, A. Terzis, and F. Tramer, "Membership inference attacks from first principles," in 2022 IEEE symposium on security and privacy (SP). IEEE, 2022, pp. 1897-1914. +[115] J. Ye, A. Maddi, S. K. Murakonda, V. Bindschaedler, and R. Shokri, "Enhanced membership inference attacks against machine learning models," in Proceedings of the 2022 ACM SIGSAC Conference on Computer and Communications Security, 2022, pp. 3093-3106. +[116] J. Zhang, D. Das, G. Kamath, and F. Tramère, "Membership inference attacks cannot prove that a model was trained on your data," arXiv preprint arXiv:2409.19798, 2024. +[117] M. Duan, A. Suri, N. Mireshghallah, S. Min, W. Shi, L. Zettlemoyer, Y. Tsvetkov, Y. Choi, D. Evans, and H. Hajishirzi, "Do membership inference attacks work on large language models?" arXiv preprint arXiv:2402.07841, 2024. +[118] M. Meeus, I. Shilov, S. Jain, M. Faysse, M. Rei, and Y.-A. de Montjoye, "Sok: Membership inference attacks on llms are rushing nowhere (and how to fix it)," arXiv preprint arXiv:2406.17975, 2024. +[119] Y. He, B. Li, Y. Wang, M. Yang, J. Wang, H. Hu, and X. Zhao, "Is difficulty calibration all we need? towards more practical membership inference attacks," in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 1226-1240. +[120] Y. He, B. Li, L. Liu, Z. Ba, W. Dong, Y. Li, Z. Qin, K. Ren, and C. Chen, "Towards label-only membership inference attack against pre-trained large lan + +guage models," in USENIX Security, 2025. +[121] J. Ren, K. Chen, C. Chen, V. Sehwag, Y. Xing, J. Tang, and L. Lyu, "Self-comparison for dataset-level membership inference in large (vision-) language model," in Proceedings of the ACM on Web Conference 2025, 2025, pp. 910-920. +[122] A. Albalak, Y. Elazar, S. M. Xie, S. Longpre, N. Lambert, X. Wang, N. Muennighoff, B. Hou, L. Pan, H. Jeong et al., "A survey on data selection for language models," arXiv preprint arXiv:2402.16827, 2024. +[123] P. Maini, S. Goyal, D. Sam, A. Robey, Y. Savani, Y. Jiang, A. Zou, Z. C. Lipton, and J. Z. Kolter, "Safety pretraining: Toward the next generation of safe ai," arXiv preprint arXiv:2504.16980, 2025. +[124] A. Hurst, A. Lerer, A. P. Goucher, A. Perelman, A. Ramesh, A. Clark, A. Ostrow, A. Welihinda, A. Hayes, A. Radford et al., "Gpt-4o system card," arXiv preprint arXiv:2410.21276, 2024. +[125] S. Li, F. Liu, L. Cui, J. Lu, Q. Xiao, X. Yang, P. Liu, K. Sun, Z. Ma, and X. Wang, "Safe planner: Empowering safety awareness in large pre-trained models for robot task planning," in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 39, no. 14, 2025, pp. 14619-14627. +[126] J. O'Neill, S. Subramanian, E. Lin, A. Satish, and V. Mugunthan, "Guardformer: Guardrail instruction pretraining for efficient safeguarding," in Neurips Safe Generative AI Workshop 2024. +[127] T. Huang, S. Hu, F. Ilhan, S. F. Tekin, and L. Liu, "Harmful fine-tuning attacks and defenses for large language models: A survey," arXiv preprint arXiv:2409.18169, 2024. +[128] M. Shu, J. Wang, C. Zhu, J. Geiping, C. Xiao, and T. Goldstein, "On the exploitability of instruction tuning," Advances in Neural Information Processing Systems, vol. 36, pp. 61-836-61-856, 2023. +[129] J. Xu, M. D. Ma, F. Wang, C. Xiao, and M. Chen, "Instructions as backdoors: Backdoor vulnerabilities of instruction tuning for large language models," arXiv preprint arXiv:2305.14710, 2023. +[130] J. Yan, V. Yadav, S. Li, L. Chen, Z. Tang, H. Wang, V. Srinivasan, X. Ren, and H. Jin, "Backdooring instruction-tuned large language models with virtual prompt injection," arXiv preprint arXiv:2307.16888, 2023. +[131] H. Yao, J. Lou, and Z. Qin, "Poisonprompt: Backdoor attack on prompt-based large language models," in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 7745-7749. +[132] S. Zhao, J. Wen, L. A. Tuan, J. Zhao, and J. Fu, "Prompt as triggers for backdoor attack: Examining the vulnerability in language models," arXiv preprint arXiv:2305.01219, 2023. +[133] Z. Han, C. Gao, J. Liu, J. Zhang, and S. Q. Zhang, "Parameter-efficient fine-tuning for large models: A comprehensive survey," arXiv preprint arXiv:2403.14608, 2024. +[134] L. Xu, H. Xie, S.-Z. J. Qin, X. Tao, and F. L. Wang, "Parameter-efficient fine-tuning methods for pretrained language models: A critical review and + +assessment," arXiv preprint arXiv:2312.12148, 2023. +[135] N. Ding, Y. Qin, G. Yang, F. Wei, Z. Yang, Y. Su, S. Hu, Y. Chen, C.-M. Chan, W. Chen et al., "Parameter-efficient fine-tuning of large-scale pre-trained language models," Nature Machine Intelligence, vol. 5, no. 3, pp. 220-235, 2023. +[136] S. Zhao, L. Gan, L. A. Tuan, J. Fu, L. Lyu, M. Jia, and J. Wen, "Defending against weight-poisoning backdoor attacks for parameter-efficient fine-tuning," arXiv preprint arXiv:2402.12168, 2024. +[137] J. Kim, M. Song, S. H. Na, and S. Shin, "Obliviate: Neutralizing task-agnostic backdoors within the parameter-efficient fine-tuning paradigm," arXiv preprint arXiv:2409.14119, 2024. +[138] S. Jiang, S. R. Kadhe, Y. Zhou, F. Ahmed, L. Cai, and N. Baracaldo, "Turning generative models degenerate: The power of data poisoning attacks," arXiv preprint arXiv:2407.12281, 2024. +[139] T. Li, A. K. Sahu, A. Talwalkar, and V. Smith, "Federated learning: Challenges, methods, and future directions," IEEE signal processing magazine, vol. 37, no. 3, pp. 50-60, 2020. +[140] C. Zhang, Y. Xie, H. Bai, B. Yu, W. Li, and Y. Gao, "A survey on federated learning," Knowledge-Based Systems, vol. 216, p. 106775, 2021. +[141] L. Li, Y. Fan, M. Tse, and K.-Y. Lin, "A review of applications in federated learning," Computers & Industrial Engineering, vol. 149, p. 106854, 2020. +[142] Z. Wang, Z. Shen, Y. He, G. Sun, H. Wang, L. Lyu, and A. Li, "Flora: Federated fine-tuning large language models with heterogeneous low-rank adaptations," arXiv preprint arXiv:2409.05976, 2024. +[143] C. Chen, X. Feng, Y. Li, L. Lyu, J. Zhou, X. Zheng, and J. Yin, "Integration of large language models and federated learning," *Patterns*, vol. 5, no. 12, 2024. +[144] W. Zhuang, C. Chen, and L. Lyu, "When foundation model meets federated learning: Motivations, challenges, and future directions," arXiv preprint arXiv:2306.15546, 2023. +[145] G. Sun, Y. Cong, J. Dong, Q. Wang, L. Lyu, and J. Liu, "Data poisoning attacks on federated machine learning," IEEE Internet of Things Journal, vol. 9, no. 13, pp. 11365-11375, 2021. +[146] L. Lyu, H. Yu, X. Ma, C. Chen, L. Sun, J. Zhao, Q. Yang, and P. S. Yu, "Privacy and robustness in federated learning: Attacks and defenses," IEEE transactions on neural networks and learning systems, vol. 35, no. 7, pp. 8726-8746, 2022. +[147] R. Ye, J. Chai, X. Liu, Y. Yang, Y. Wang, and S. Chen, "Emerging safety attack and defense in federated instruction tuning of large language models," arXiv preprint arXiv:2406.10630, 2024. +[148] Z. Zhang, A. Panda, L. Song, Y. Yang, M. Mahoney, P. Mittal, R. Kannan, and J. Gonzalez, "Neurotoxin: Durable backdoors in federated learning," in International Conference on Machine Learning. PMLR, 2022, pp. 26429-26446. +[149] T. Fu, M. Sharma, P. Torr, S. B. Cohen, D. Krueger, and F. Berez, “Poisonbench: Assessing large language model vulnerability to data poisoning,” arXiv preprint arXiv:2410.08811, 2024. + +[150] P. Pathmanathan, S. Chakraborty, X. Liu, Y. Liang, and F. Huang, "Is poisoning a real threat to llm alignment? maybe more so than you think," arXiv preprint arXiv:2406.12091, 2024. +[151] A. Wan, E. Wallace, S. Shen, and D. Klein, “Poisoning language models during instruction tuning,” in International Conference on Machine Learning. PMLR, 2023, pp. 35413-35425. +[152] J. Rando and F. Tramer, "Universal jailbreak backdoors from poisoned human feedback," arXiv preprint arXiv:2311.14455, 2023. +[153] T. Baumgartner, Y. Gao, D. Alon, and D. Metzler, "Best-of-venom: Attacking rlhf by injecting poisoned preference data," arXiv preprint arXiv:2404.05530, 2024. +[154] B. Chen, H. Guo, G. Wang, Y. Wang, and Q. Yan, "The dark side of human feedback: Poisoning large language models via user inputs," arXiv preprint arXiv:2409.00787, 2024. +[155] Y. Bai, A. Jones, K. Ndousse, A. Askell, A. Chen, N. DasSarma, D. Drain, S. Fort, D. Ganguli, T. Henighan et al., "Training a helpful and harmless assistant with reinforcement learning from human feedback," arXiv preprint arXiv:2204.05862, 2022. +[156] H. Dong, W. Xiong, B. Pang, H. Wang, H. Zhao, Y. Zhou, N. Jiang, D. Sahoo, C. Xiong, and T. Zhang, "Rlhf workflow: From reward modeling to online rlhf," arXiv preprint arXiv:2405.07863, 2024. +[157] W. Xiong, H. Dong, C. Ye, Z. Wang, H. Zhong, H. Ji, N. Jiang, and T. Zhang, "Iterative preference learning from human feedback: Bridging theory and practice for rlhf under kl-constraint," arXiv preprint arXiv:2312.11456, 2023. +[158] H. Lee, S. Phatale, H. Mansoor, K. R. Lu, T. Mesnard, J. Ferret, C. Bishop, E. Hall, V. Carbune, and A. Rastogi, "Rlaif: Scaling reinforcement learning from human feedback with ai feedback," 2023. +[159] R. Rafailov, A. Sharma, E. Mitchell, C. D. Manning, S. Ermon, and C. Finn, "Direct preference optimization: Your language model is secretly a reward model," Advances in Neural Information Processing Systems, vol. 36, pp. 53728-53741, 2023. +[160] J. Wang, J. Wu, M. Chen, Y. Vorobeychik, and C. Xiao, "Rlhfpoison: Reward poisoning attack for reinforcement learning with human feedback in large language models," arXiv preprint arXiv:2311.09641, 2023. +[161] S. Gunasekar, Y. Zhang, J. Aneja, C. C. T. Mendes, A. Del Giorno, S. Gopi, M. Javaheripi, P. Kauffmann, G. de Rosa, O. Saarikivi et al., "Textbooks are all you need," arXiv preprint arXiv:2306.11644, 2023. +[162] Y. Li, S. Bubeck, R. Eldan, A. Del Giorno, S. Gunasekar, and Y. T. Lee, "Textbooks are all you need ii: phi-1.5 technical report," arXiv preprint arXiv:2309.05463, 2023. +[163] J. Zhan, J. Dai, J. Ye, Y. Zhou, D. Zhang, Z. Liu, X. Zhang, R. Yuan, G. Zhang, L. Li et al., "Anygpt: Unified multimodal llm with discrete sequence modeling," arXiv preprint arXiv:2402.12226, 2024. +[164] H. Wang, C. Liu, N. Xi, Z. Qiang, S. Zhao, B. Qin, and T. Liu, "Huatuo: Tuning llama model with chinese medical knowledge," arXiv preprint arXiv:2304.06975, 2023. + +[165] P. Sutanto, J. Santoso, E. I. Setiawan, and A. P. Wibawa, "Llm distillation for efficient few-shot multiple choice question answering," arXiv preprint arXiv:2412.09807, 2024. +[166] X. Zhu, J. Li, Y. Liu, C. Ma, and W. Wang, "Distilling mathematical reasoning capabilities into small language models," Neural Networks, vol. 179, p. 106594, 2024. +[167] R. Xu, H. Cui, Y. Yu, X. Kan, W. Shi, Y. Zhuang, W. Jin, J. Ho, and C. Yang, "Knowledge-infused prompting: Assessing and advancing clinical text data generation with large language models," arXiv preprint arXiv:2311.00287, 2023. +[168] N. Crispino, K. Montgomery, F. Zeng, D. Song, and C. Wang, "Agent instructs large language models to be general zero-shot reasoners," arXiv preprint arXiv:2310.03710, 2023. +[169] C. Li, C. Zhang, Y. Lu, J. Zhang, Q. Sun, X. Wang, J. Wei, G. Wang, Y. Yang, and H. T. Shen, "Syzygy of thoughts: Improving llm cot with the minimal free resolution," arXiv preprint arXiv:2504.09566, 2025. +[170] Z. Chen, K. Liu, Q. Wang, W. Zhang, J. Liu, D. Lin, K. Chen, and F. Zhao, "Agent-flan: Designing data and methods of effective agent tuning for large language models," arXiv preprint arXiv:2403.12881, 2024. +[171] C. Xu, Q. Sun, K. Zheng, X. Geng, P. Zhao, J. Feng, C. Tao, and D. Jiang, "Wizardlm: Empowering large language models to follow complex instructions," arXiv preprint arXiv:2304.12244, 2023. +[172] S. Mukherjee, A. Mitra, G. Jawahar, S. Agarwal, H. Palangi, and A. Awadallah, "Orca: Progressive learning from complex explanation traces of gpt-4," arXiv preprint arXiv:2306.02707, 2023. +[173] Y. Wang, Y. Kordi, S. Mishra, A. Liu, N. A. Smith, D. Khashabi, and H. Hajishirzi, "Self-instruct: Aligning language models with self-generated instructions," arXiv preprint arXiv:2212.10560, 2022. +[174] R. Ri, S. Kiyono, and S. Takase, "Self-translatabrain: Enhancing cross-lingual transfer of large language models via inherent capability," arXiv preprint arXiv:2407.00454, 2024. +[175] J. Ji, M. Liu, J. Dai, X. Pan, C. Zhang, C. Bian, B. Chen, R. Sun, Y. Wang, and Y. Yang, "Beavertails: Towards improved safety alignment of llm via a human-preference dataset," Advances in Neural Information Processing Systems, vol. 36, pp. 24678-24704, 2023. +[176] H. Lightman, V. Kosaraju, Y. Burda, H. Edwards, B. Baker, T. Lee, J. Leike, J. Schulman, I. Sutskever, and K. Cobbe, "Let's verify step by step," in The Twelfth International Conference on Learning Representations, 2023. +[177] R. Nakano, J. Hilton, S. Balaji, J. Wu, L. Ouyang, C. Kim, C. Hesse, S. Jain, V. Kosaraju, W. Saunders et al., "Webgpt: Browser-assisted question-answering with human feedback," arXiv preprint arXiv:2112.09332, 2021. +[178] C. Chen, J. Fu, and L. Lyu, "A pathway towards responsible ai generated content," arXiv preprint arXiv:2303.01325, 2023. +[179] A. Akkus, M. P. Aghdam, M. Li, J. Chu, M. Backes, Y. Zhang, and S. Sav, "Generated data with fake privacy: Hidden dangers of fine-tuning large lan + +guage models on generated data," arXiv preprint arXiv:2409.11423, 2024. +[180] Y. Song, J. Zhang, Z. Tian, Y. Yang, M. Huang, and D. Li, "Llm-based privacy data augmentation guided by knowledge distillation with a distribution tutor for medical text classification," arXiv preprint arXiv:2402.16515, 2024. +[181] A. Kang, J. Y. Chen, Z. Lee-Youngzie, and S. Fu, "Synthetic data generation with llm for improved depression prediction," arXiv preprint arXiv:2411.17672, 2024. +[182] A. Taubenfeld, Y. Dover, R. Reichart, and A. Goldstein, "Systematic biases in llm simulations of debates," arXiv preprint arXiv:2402.04049, 2024. +[183] A. Mishra, G. Nayak, S. Bhattacharya, T. Kumar, A. Shah, and M. Foltin, "Llm-guided counterfactual data generation for fairer ai," in Companion Proceedings of the ACM Web Conference 2024, 2024, pp. 1538-1545. +[184] Y. Yu, Y. Zhuang, J. Zhang, Y. Meng, A. J. Ratner, R. Krishna, J. Shen, and C. Zhang, "Large language model as attributed training data generator: A tale of diversity and bias," Advances in Neural Information Processing Systems, vol. 36, pp. 55734-55784, 2023. +[185] A. Borah and R. Mihalcea, "Towards implicit bias detection and mitigation in multi-agent lvm interactions," arXiv preprint arXiv:2410.02584, 2024. +[186] X. Dong, Y. Wang, P. S. Yu, and J. Caverlee, "Disclosure and mitigation of gender bias in llms," arXiv preprint arXiv:2402.11190, 2024. +[187] I. M. Serouis and F. Sèdes, “Exploring large language models for bias mitigation and fairness,” in 1st International Workshop on AI Governance (AIGOV) in conjunction with the Thirty-Third International Joint Conference on Artificial Intelligence, 2024. +[188] Y. Chen, Q. Fu, Y. Yuan, Z. Wen, G. Fan, D. Liu, D. Zhang, Z. Li, and Y. Xiao, "Hallucination detection: Robustly discerning reliable answers in large language models," in Proceedings of the 32nd ACM International Conference on Information and Knowledge Management, 2023, pp. 245-255. +[189] N. Chakraborty, M. Ornik, and K. Driggs-Campbell, "Hallucination detection in foundation models for decision-making: A flexible definition and review of the state of the art," ACM Computing Surveys, 2025. +[190] E. Entezami and A. Naseh, "Llm misalignment via adversarial rlhf platforms," arXiv preprint arXiv:2503.03039, 2025. +[191] J. Achiam, S. Adler, S. Agarwal, L. Ahmad, I. Akkaya, F. L. Aleman, D. Almeida, J. Altenschmidt, S. Altman, S. Anadkat et al., "Gpt-4 technical report," arXiv preprint arXiv:2303.08774, 2023. +[192] A. Young, B. Chen, C. Li, C. Huang, G. Zhang, G. Zhang, G. Wang, H. Li, J. Zhu, J. Chen et al., "Yi: Open foundation models by 01. ai," arXiv preprint arXiv:2403.04652, 2024. +[193] A. Dubey, A. Jauhri, A. Pandey, A. Kadian, A. Al-Dahle, A. Letman, A. Mathur, A. Schelten, A. Yang, A. Fan et al., "The llama 3 herd of models," arXiv preprint arXiv:2407.21783, 2024. +[194] Z. Cai, M. Cao, H. Chen, K. Chen, K. Chen, X. Chen, X. Chen, Z. Chen, Z. Chen, P. Chu et al., "InternlM2 + +technical report," arXiv preprint arXiv:2403.17297, 2024. +[195] R. Anil, A. M. Dai, O. First, M. Johnson, D. Lepikhin, A. Passos, S. Shakeri, E. Taropa, P. Bailey, Z. Chen et al., "Palm 2 technical report," arXiv preprint arXiv:2305.10403, 2023. +[196] T. GLM, A. Zeng, B. Xu, B. Wang, C. Zhang, D. Yin, D. Zhang, D. Rojas, G. Feng, H. Zhao et al., "Chatglm: A family of large language models from glm-130b to glm-4 all tools," arXiv preprint arXiv:2406.12793, 2024. +[197] G. Team, R. Anil, S. Borgeaud, J.-B. Alayrac, J. Yu, R. Soricut, J. Schalkwyk, A. M. Dai, A. Hauth, K. Millican et al., "Gemini: a family of highly capable multimodal models," arXiv preprint arXiv:2312.11805, 2023. +[198] G. Team, T. Mesnard, C. Hardin, R. Dadashi, S. Bhupatiraju, S. Pathak, L. Sifre, M. Rivière, M. S. Kale, J. Love et al., "Gemma: Open models based on gemini research and technology," arXiv preprint arXiv:2403.08295, 2024. +[199] D. Groeneveld, I. Beltagy, P. Walsh, A. Bhagia, R. Kinney, O. Tafjord, A. H. Jha, H. Ivison, I. Magnusson, Y. Wang et al., "Olmo: Accelerating the science of language models," arXiv preprint arXiv:2402.00838, 2024. +[200] B. Adler, N. Agarwal, A. Aithal, D. H. Anh, P. Bhattacharya, A. Brundyn, J. Casper, B. Catanzaro, S. Clay, J. Cohen et al., "Nemotron-4 340b technical report," arXiv preprint arXiv:2406.11704, 2024. +[201] A. Jaech, A. Kalai, A. Lerer, A. Richardson, A. El-Kishky, A. Low, A. Helyar, A. Madry, A. Beutel, A. Carney et al., "Openai o1 system card," arXiv preprint arXiv:2412.16720, 2024. +[202] OpenAI, "Gpt-4o mini: advancing cost-efficient intelligence," 2024, https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence. +[203] A. Yang, B. Xiao, B. Wang, B. Zhang, C. Bian, C. Yin, C. Lv, D. Pan, D. Wang, D. Yan et al., "Baichuan 2: Open large-scale language models," arXiv preprint arXiv:2309.10305, 2023. +[204] J. Welbl, A. Glaese, J. Uesato, S. Dathathri, J. Mellor, L. A. Hendricks, K. Anderson, P. Kohli, B. Coppin, and P.-S. Huang, "Challenges in detoxifying language models," in Findings of the Association for Computational Linguistics: EMNLP 2021, 2021, pp. 2447-2469. +[205] H. Ngo, C. Raterink, J. G. Araújo, I. Zhang, C. Chen, A. Morisot, and N. Frosst, "Mitigating harm in language models with conditional-likelihood filtration," arXiv preprint arXiv:2108.07790, 2021. +[206] Y. Chen, W. Cai, L. Wu, X. Li, Z. Xin, and C. Fu, "Tigerbot: An open multilingual multitask llm," arXiv preprint arXiv:2312.08688, 2023. +[207] S. Prabhumoye, M. Patwary, M. Shoeybi, and B. Catanzaro, "Adding instructions during pretraining: Effective way of controlling toxicity in language models," in Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics, 2023, pp. 2636-2651. +[208] Y. Ge, W. Sun, Y. Lou, C. Fang, Y. Zhang, Y. Li, X. Zhang, Y. Liu, Z. Zhao, and Z. Chen, "Demonstration attack against in-context learning for code intelligence," CoRR, vol. abs/2410.02841, no. 1, pp. 1-17, 2024. + +[209] G. Team, P. Georgiev, V. I. Lei, R. Burnell, L. Bai, A. Gulati, G. Tanzer, D. Vincent, Z. Pan, S. Wang et al., "Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context," arXiv preprint arXiv:2403.05530, 2024. +[210] J. Parmar, S. Prabhumoye, J. Jennings, M. Patwary, S. Subramanian, D. Su, C. Zhu, D. Narayanan, A. Jhunjunwala, A. Dattagupta et al., "Nemotron-4 15b technical report," arXiv preprint arXiv:2402.16819, 2024. +[211] C. Raffel, N. Shazeer, A. Roberts, K. Lee, S. Narang, M. Matena, Y. Zhou, W. Li, and P. J. Liu, "Exploring the limits of transfer learning with a unified text-to-text transformer," Journal of machine learning research, vol. 21, no. 140, pp. 1-67, 2020. +[212] T. Markov, C. Zhang, S. Agarwal, F. E. Nekoul, T. Lee, S. Adler, A. Jiang, and L. Weng, “A holistic approach to undesired content detection in the real world,” in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 37, no. 12, 2023, pp. 15009-15018. +[213] A. Dubey, A. Jauhri, A. Pandey, A. Kadian, A. Al-Dahle, A. Letman, A. Mathur, A. Schelten, A. Yang, A. Fan et al., "The llama 3 herd of models," arXiv preprint arXiv:2407.21783, 2024. +[214] T. Huang, S. Hu, F. Ilhan, S. F. Tekin, and L. Liu, "Harmful fine-tuning attacks and defenses for large language models: A survey," arXiv preprint arXiv:2409.18169, 2024. +[215] J. Wu, Y. Xie, Z. Yang, J. Wu, J. Chen, J. Gao, B. Ding, X. Wang, and X. He, "Towards robust alignment of language models: Distributionally robustifying direct preference optimization," arXiv preprint arXiv:2407.07880, 2024. +[216] Z. Xu, S. Vemuri, K. Panaganti, D. Kalathil, R. Jain, and D. Ramachandran, "Distributionally robust direct preference optimization," arXiv preprint arXiv:2502.01930, 2025. +[217] J. Dai, X. Pan, R. Sun, J. Ji, X. Xu, M. Liu, Y. Wang, and Y. Yang, "Safe rlhf: Safe reinforcement learning from human feedback," in The Twelfth International Conference on Learning Representations, 2023. +[218] C. O. Retzlaff, S. Das, C. Wayllace, P. Mousavi, M. Afshari, T. Yang, A. Saranti, A. Angerschmid, M. E. Taylor, and A. Holzinger, "Human-in-the-loop reinforcement learning: A survey and position on requirements, challenges, and opportunities," Journal of Artificial Intelligence Research, vol. 79, pp. 359-415, 2024. +[219] S. Milani, N. Topin, M. Veloso, and F. Fang, "Explainable reinforcement learning: A survey and comparative review," ACM Computing Surveys, vol. 56, no. 7, pp. 1-36, 2024. +[220] A. Ahmadian, C. Cremer, M. Galle, M. Fadaee, J. Kreutzer, O. Pietquin, A. Üstün, and S. Hooker, "Back to basics: Revisiting reinforce style optimization for learning from human feedback in llms," arXiv preprint arXiv:2402.14740, 2024. +[221] T. Liu, Z. Qin, J. Wu, J. Shen, M. Khalman, R. Joshi, Y. Zhao, M. Saleh, S. Baumgartner, J. Liu et al., "Lipo: Listwise preference optimization through learning-torank," arXiv preprint arXiv:2402.01878, 2024. +[222] F. Song, B. Yu, M. Li, H. Yu, F. Huang, Y. Li, and + +H. Wang, "Preference ranking optimization for human alignment," in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 17, 2024, pp. 18990-18998. +[223] Z. Wang, B. Bi, S. K. Pentyala, K. Ramnath, S. Chaudhuri, S. Mehrotra, X.-B. Mao, S. Asur et al., "A comprehensive survey of llm alignment techniques: Rlhf, rlaif, ppo, dpo and more," arXiv preprint arXiv:2407.16216, 2024. +[224] T. Huang, S. Hu, F. Ilhan, S. F. Tekin, and L. Liu, "Lisa: Lazy safety alignment for large language models against harmful fine-tuning attack," in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=RPChapuXIC +[225] T. Huang, S. Hu, and L. Liu, "Vaccine: Perturbation-aware alignment for large language models against harmful fine-tuning attack," in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=lpXDZKiAnt +[226] J. Wang, J. Li, Y. Li, X. Qi, J. Hu, Y. Li, P. McDaniel, M. Chen, B. Li, and C. Xiao, "Backdooralign: Mitigating fine-tuning based jailbreak attack with backdoor enhanced safety alignment," in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=1PcjJ5Evta7 +[227] F. Bianchi, M. Suzgun, G. Attanasio, P. Rottger, D. Jurafsky, T. Hashimoto, and J. Zou, "Safety-tuned LLaMAs: Lessons from improving the safety of large language models that follow instructions," in The Twelfth International Conference on Learning Representations, 2024. [Online]. Available: https://openreview.net/forum?id=gT5hALch9z +[228] H. Shen, P.-Y. Chen, P. Das, and T. Chen, "SEAL: Safety-enhanced aligned LLM fine-tuning via bilevel data selection," in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=VHguhvcoM5 +[229] R. Tang, J. Yuan, Y. Li, Z. Liu, R. Chen, and X. Hu, "Setting the trap: Capturing and defeating backdoor threats in plms through honeypots," NeurIPS, 2023. +[230] C.-Y. Hsu, Y.-L. Tsai, C.-H. Lin, P.-Y. Chen, C.-M. Yu, and C.-Y. Huang, "Safe loRA: The silver lining of reducing safety risks when finetuning large language models," in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=HcifdQZFV +[231] R. Hazra, S. Layek, S. Banerjee, and S. Poria, "Safety arithmetic: A framework for test-time safety alignment of language models by steering parameters and activations," in Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, 2024, pp. 21759-21776. +[232] Y. Du, S. Zhao, D. Zhao, M. Ma, Y. Chen, L. Huo, Q. Yang, D. Xu, and B. Qin, "MoGU: A framework for enhancing safety of LLMs while preserving their usability," in The Thirty- + +eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=SrFbgIjb53 +[233] X. Yi, S. Zheng, L. Wang, G. de Melo, X. Wang, and L. He, "Nlsr: Neuron-level safety realignment of large language models against harmful fine-tuning," arXiv preprint arXiv:2412.12497, 2024. +[234] D. Shi, T. Shen, Y. Huang, Z. Li, Y. Leng, R. Jin, C. Liu, X. Wu, Z. Guo, L. Yu et al., "Large language model safety: A holistic survey," arXiv preprint arXiv:2412.17686, 2024. +[235] B. Ni, Z. Liu, L. Wang, Y. Lei, Y. Zhao, X. Cheng, Q. Zeng, L. Dong, Y. Xia, K. Kenthapadi et al., "Towards trustworthy retrieval augmented generation for large language models: A survey," arXiv preprint arXiv:2502.06872, 2025. +[236] F. Berez, T. Fu, A. Prabhu, S. Casper, A. Sanyal, A. Bibi, A. O'Gara, R. Kirk, B. Bucknall, T. Fist, L. Ong, P. Torr, K. Lam, R. Trager, D. Krueger, S. Mindermann, J. Hernández-Orallo, M. Geva, and Y. Gal, "Open problems in machine unlearning for AI safety," CoRR, 2025. +[237] U. Anwar, A. Saparov, J. Rando, D. Paleka, M. Turpin, P. Hase, E. S. Lubana, E. Jenner, S. Casper, O. Sourbut et al., “Foundational challenges in assuring alignment and safety of large language models,” arXiv preprint arXiv:2404.09932, 2024. +[238] X. Qi, Y. Zeng, T. Xie, P.-Y. Chen, R. Jia, P. Mittal, and P. Henderson, "Fine-tuning aligned language models compromises safety, even when users do not intend to!" arXiv preprint arXiv:2310.03693, 2023. +[239] X. Yang, X. Wang, Q. Zhang, L. Petzold, W. Y. Wang, X. Zhao, and D. Lin, "Shadow alignment: The ease of subverting safely-aligned language models.(2023)," arXiv preprint arXiv:2310.02949, 2023. +[240] Q. Zhan, R. Fang, R. Bindu, A. Gupta, T. Hashimoto, and D. Kang, "Removing rlhf protections in gpt-4 via fine-tuning," arXiv preprint arXiv:2311.05553, 2023. +[241] J. Kazdan, L. Yu, R. Schaeffer, C. Cundy, S. Koyejo, and D. Krishnamurthy, "No, of course i can! refusal mechanisms can be exploited using harmless finetuning data," arXiv preprint arXiv:2502.19537, 2025. +[242] D. Halawi, A. Wei, E. Wallace, T. T. Wang, N. Haghtalab, and J. Steinhardt, "Covert malicious finetuning: Challenges in safeguarding llm adaptation," arXiv preprint arXiv:2406.20053, 2024. +[243] T. Huang, S. Hu, F. Ilhan, S. F. Tekin, and L. Liu, "Virus: Harmful fine-tuning attack for large language models bypassing guardrail moderation," arXiv preprint arXiv:2501.17433, 2025. +[244] Y. Qiang, X. Zhou, S. Z. Zade, M. A. Roshani, P. Khan-duri, D. Zytko, and D. Zhu, "Learning to poison large language models during instruction tuning," arXiv preprint arXiv:2402.13459, 2024. +[245] J. Raghuram, G. Kesidis, and D. J. Miller, "A study of backdoors in instruction fine-tuned language models," arXiv preprint arXiv:2406.07778, 2024. +[246] J. Yi, R. Ye, Q. Chen, B. Zhu, S. Chen, D. Lian, G. Sun, X. Xie, and F. Wu, "On the vulnerability of safety alignment in open-access llms," in Findings of the Association for Computational Linguistics ACL 2024, + +2024, pp. 9236-9260. +[247] S. Lermen, C. Rogers-Smith, and J. Ladish, "Lora finetuning efficiently undoes safety training in llama 2-chat 70b," arXiv preprint arXiv:2310.20624, 2023. +[248] L. Piercing, "Lora-as-an-attack! piercing llm safety under the share-and-play scenario." +[249] S. Poppi, Z.-X. Yong, Y. He, B. Chern, H. Zhao, A. Yang, and J. Chi, "Towards understanding the fragility of multilingual llms against fine-tuning attacks," arXiv preprint arXiv:2410.18210, 2024. +[250] S. Li, E. C.-H. Ngai, F. Ye, and T. Voigt, "Peft-as-an-attack! jailbreaking language models during federated parameter-efficient fine-tuning," arXiv preprint arXiv:2411.19335, 2024. +[251] N. Razin, S. Malladi, A. Bhaskar, D. Chen, S. Arora, and B. Hanin, "Unintentional unalignment: Likelihood displacement in direct preference optimization," arXiv preprint arXiv:2410.08847, 2024. +[252] R. Xu, Y. Cai, Z. Zhou, R. Gu, H. Weng, Y. Liu, T. Zhang, W. Xu, and H. Qiu, "Course-correction: Safety alignment using synthetic preferences," arXiv preprint arXiv:2407.16637, 2024. +[253] J. Ji, B. Chen, H. Lou, D. Hong, B. Zhang, X. Pan, T. A. Qiu, J. Dai, and Y. Yang, "Aligner: Efficient alignment by learning to correct," Advances in Neural Information Processing Systems, vol. 37, pp. 90853-90890, 2024. +[254] D. Ganguli, L. Lovitt, J. Kernion, A. Askell, Y. Bai, S. Kadavath, B. Mann, E. Perez, N. Schiefer, K. Ndousse et al., "Red teaming language models to reduce harms: Methods, scaling behaviors, and lessons learned," arXiv preprint arXiv:2209.07858, 2022. +[255] T. Xiao, Y. Yuan, H. Zhu, M. Li, and V. G. Honavar, "Cal-DPO: Calibrated direct preference optimization for language model alignment," in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=57OQXxbTbY +[256] S. Guo, B. Zhang, T. Liu, T. Liu, M. Khalman, F. Llinares, A. Rame, T. Mesnard, Y. Zhao, B. Piot et al., "Direct language model alignment from online ai feedback," arXiv preprint arXiv:2402.04792, 2024. +[257] Z. Liu, X. Sun, and Z. Zheng, "Enhancing llm safety via constrained direct preference optimization," arXiv preprint arXiv:2403.02475, 2024. +[258] H. Lee, S. Phatale, H. Mansoor, T. Mesnard, J. Ferret, K. R. Lu, C. Bishop, E. Hall, V. Carbune, A. Rastogi, and S. Prakash, "RLAIF vs. RLHF: Scaling reinforcement learning from human feedback with AI feedback," in *Forty-first International Conference on Machine Learning*, 2024. [Online]. Available: https://openreview.net/forum?id=uydQ2W41KO +[259] X. Lu, B. Yu, Y. Lu, H. Lin, H. Yu, L. Sun, X. Han, and Y. Li, "Sofa: Shielded on-the-fly alignment via priority rule following," in Findings of the Association for Computational Linguistics ACL 2024, 2024, pp. 7108-7136. +[260] A. Zou, Z. Wang, N. Carlini, M. Nasr, J. Z. Kolter, and M. Fredrikson, "Universal and transferable adversarial attacks on aligned language models," arXiv preprint arXiv:2307.15043, 2023. +[261] P. Chao, A. Robey, E. Dobriban, H. Hassani, G. J. + +Pappas, and E. Wong, "Jailbreaking black box large language models in twenty queries," arXiv preprint arXiv:2310.08419, 2023. +[262] Z. Zhou, J. Xiang, H. Chen, Q. Liu, Z. Li, and S. Su, "Speak out of turn: Safety vulnerability of large language models in multi-turn dialogue," arXiv preprint arXiv:2402.17262, 2024. +[263] Q. Ren, H. Li, D. Liu, Z. Xie, X. Lu, Y. Qiao, L. Sha, J. Yan, L. Ma, and J. Shao, "Derail yourself: Multi-turn llm jailbreak attack through self-discovered clues," arXiv preprint arXiv:2410.10700, 2024. +[264] X. Pang, S. Tang, R. Ye, Y. Xiong, B. Zhang, Y. Wang, and S. Chen, "Self-alignment of large language models via monopolylogue-based social scene simulation," in Proceedings of the 41st International Conference on Machine Learning, 2024, pp. 39-46. +[265] J. Ji, D. Hong, B. Zhang, B. Chen, J. Dai, B. Zheng, T. Qiu, B. Li, and Y. Yang, "Pku-saferlhf: Towards multi-level safety alignment for llms with human preference," arXiv preprint arXiv:2406.15513, 2024. +[266] T. Mu, A. Helyar, J. Heidecke, J. Achiam, A. Vallone, I. D. Kivlichan, M. Lin, A. Beutel, J. Schulman, and L. Weng, "Rule based rewards for language model safety," in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. +[267] X. Tan, S. Shi, X. Qiu, C. Qu, Z. Qi, Y. Xu, and Y. Qi, "Self-criticism: Aligning large language models with their understanding of helpfulness, honesty, and harmlessness," in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track, M. Wang and I. Zitouni, Eds. Singapore: Association for Computational Linguistics, Dec. 2023, pp. 650-662. [Online]. Available: https://aclanthology.org/2023.emnlp-industry.62/ +[268] M. Y. Guan, M. Joglekar, E. Wallace, S. Jain, B. Barak, A. Heylar, R. Dias, A. Vallone, H. Ren, J. Wei et al., "Deliberative alignment: Reasoning enables safer language models," arXiv preprint arXiv:2412.16339, 2024. +[269] B. Wei, K. Huang, Y. Huang, T. Xie, X. Qi, M. Xia, P. Mittal, M. Wang, and P. Henderson, "Assessing the brittleness of safety alignment via pruning and low-rank modifications," in *Forty-first International Conference on Machine Learning*, 2024. [Online]. Available: https://openreview.net/forum?id=K6xxnKN2gm +[270] A. Arditi, O. B. Obeso, A. Syed, D. Paleka, N. Rimsky, W. Gurnee, and N. Nanda, "Refusal in language models is mediated by a single direction," in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=pH3XAQME6c +[271] R. Ye, J. Chai, X. Liu, Y. Yang, Y. Wang, and S. Chen, "Emerging safety attack and defense in federated instruction tuning of large language models," in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=sYNWqQYJhz +[272] J. Mukhoti, Y. Gal, P. Torr, and P. K. Dokania, "Finetuning can cripple foundation models; preserving features may be the solution," 2024. [Online]. Available: https://openreview.net/forum?id=VQ7Q6qdp0P + +[273] Y. Du, S. Zhao, J. Cao, M. Ma, D. Zhao, F. FAN, T. Liu, and B. Qin, "Towards secure tuning: Mitigating security risks arising from benign instruction fine-tuning," 2024. [Online]. Available: https://openreview.net/forum?id=Egd7Vi1EuA +[274] J. Li and J.-E. Kim, "Safety alignment shouldn't be complicated," 2025. [Online]. Available: https://openreview.net/forum?id=9H91juqfgb +[275] S. Li, L. Yao, L. Zhang, and Y. Li, "Safety layers in aligned large language models: The key to LLM security," in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=kUH1yPMAn7 +[276] Z. Zhou, H. Yu, X. Zhang, R. Xu, F. Huang, K. Wang, Y. Liu, J. Fang, and Y. Li, "On the role of attention heads in large language model safety," in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=h0Ak8A5yqw +[277] M. Li, W. M. Si, M. Backes, Y. Zhang, and Y. Wang, "SaloRA: Safety-alignment preserved low-rank adaptation," in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=GOoVzE9nSj +[278] Y. Zong, O. Bohdal, T. Yu, Y. Yang, and T. Hospedales, "Safety fine-tuning at (almost) no cost: A baseline for vision large language models," in *Forty-first International Conference on Machine Learning*, 2024. [Online]. Available: https://openreview.net/forum?id=bWZKvF0g7G +[279] F. Eiras, A. Petrov, P. Torr, M. P. Kumar, and A. Bibi, "Do as i do (safely): Mitigating task-specific fine-tuning risks in large language models," in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=IXE5lB6ppV +[280] J. Luo, X. Luo, K. Ding, J. Yuan, Z. Xiao, and M. Zhang, "Robustft: Robust supervised fine-tuning for large language models under noisy response," 2024. [Online]. Available: https://arxiv.org/abs/2412.14922 +[281] K. Lyu, H. Zhao, X. Gu, D. Yu, A. Goyal, and S. Arora, "Keeping LLMs aligned after finetuning: The crucial role of prompt templates," in ICLR 2024 Workshop on Reliable and Responsible Foundation Models, 2024. [Online]. Available: https://openreview.net/forum?id=XlnpQOn95Z +[282] P. Hacker, A. Engel, and M. Mauer, "Regulating chatgpt and other large generative ai models," in Proceedings of the 2023 ACM Conference on Fairness, Accountability, and Transparency. Association for Computing Machinery, 2023. +[283] M. Kolla, S. Salunkhe, E. Chandrasekharan, and K. Saha, "Llm-mod: Can large language models assist content moderation?" in Extended Abstracts of the CHI Conference on Human Factors in Computing Systems. Association for Computing Machinery, 2024. +[284] D. Kumar, Y. A. AbuHashem, and Z. Durmeric, "Watch your language: Investigating content moderation with large language models," Proceedings of the International AAAI Conference on Web and Social Media, 2024. + +[285] H. K. Choi, X. Du, and Y. Li, "Safety-aware finetuning of large language models," in Neurips Safe Generative AI Workshop 2024, 2024. [Online]. Available: https://openreview.net/forum?id=SqL94fLSM7 +[286] H. Ge, Y. Li, Q. Wang, Y. Zhang, and R. Tang, "When backdoors speak: Understanding llm backdoor attacks through model-generated explanations," arXiv preprint arXiv:2411.12701, 2024. +[287] B. Yi, T. Huang, S. Chen, T. Li, Z. Liu, Z. Chu, and Y. Li, "Probe before you talk: Towards black-box defense against backdoor unalignment for large language models," in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=EbxYDBhE3S +[288] B. Tran, J. Li, and A. Madry, "Spectral signatures in backdoor attacks," in Advances in Neural Information Processing Systems. Curran Associates, Inc., 2018. +[289] S. Casper, L. Schulze, O. Patel, and D. Hadfield-Menell, "Defending against unforeseen failure modes with latent adversarial training," 2024. [Online]. Available: https://arxiv.org/abs/2403.05030 +[290] T. Huang, G. Bhattacharya, P. Joshi, J. Kimball, and L. Liu, "Antidote: Post-fine-tuning safety alignment for large language models against harmful finetuning," 2024. [Online]. Available: https://arxiv.org/abs/2408.09600 +[291] J. Li, "Detecting instruction fine-tuning attack on language models with influence function," arXiv preprint arXiv:2504.09026, 2025. +[292] X. Yi, S. Zheng, L. Wang, X. Wang, and L. He, "A safety realignment framework via subspace-oriented model fusion for large language models," Knowledge-Based Systems, 2024. +[293] M. Zhu, Y. Weng, L. Yang, Y. Wei, N. Zhang, and Y. Zhang, "Locking down the finetuned LLMs safety," 2025. [Online]. Available: https://openreview.net/forum?id=YGoFl5KKFc +[294] D. Wu, X. Lu, Y. Zhao, and B. Qin, "Separate the wheat from the chaff: A post-hoc approach to safety re-alignment for fine-tuned language models," 2025. [Online]. Available: https://arxiv.org/abs/2412.11041 +[295] Y. Wang, T. Huang, L. Shen, H. Yao, H. Luo, R. Liu, N. Tan, J. Huang, and D. Tao, "Panacea: Mitigating harmful fine-tuning for large language models via post-fine-tuning perturbation," 2025. [Online]. Available: https://arxiv.org/abs/2501.18100 +[296] Q. Liu, C. Shang, L. Liu, N. Pappas, J. Ma, N. A. John, S. Doss, L. Marquez, M. Ballesteros, and Y. Benajiba, "Unraveling and mitigating safety alignment degradation of vision-language models," 2025. [Online]. Available: https://openreview.net/forum?id=EEWpE9cR27 +[297] S. Xu, L. Pang, Y. Zhu, H. Shen, and X. Cheng, "Cross-modal safety mechanism transfer in large vision-language models," arXiv preprint arXiv:2410.12662, 2024. +[298] S. Li, L. Yao, L. Zhang, and Y. Li, "Safety layers in aligned large language models: The key to llm security," arXiv preprint arXiv:2408.17003, 2024. +[299] W. Zhao, Z. Li, Y. Li, Y. Zhang, and J. Sun, "Defending large language models against jailbreak + +attacks via layer-specific editing," 2024. [Online]. Available: https://arxiv.org/abs/2405.18166 +[300] NIST, "Artificial intelligence risk management framework: Generative artificial intelligence profile (initial public draft)," 2024, accessed: 2025-05-29. [Online]. Available: https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.800-1.ipd.pdf +[301] X. Qi, B. Wei, N. Carlini, Y. Huang, T. Xie, L. He, M. Jagielski, M. Nasr, P. Mittal, and P. Henderson, "On Evaluating the Durability of Safeguards for Open-Weight LLMs," Dec. 2024. +[302] D. Rosati, J. Wehner, K. Williams, L. Bartoszcze, R. Gonzales, C. Maple, S. Majumdar, H. Sajjad, and F. Rudzicz, "Representation Noising: A Defence Mechanism Against Harmful Finetuning," in The Thirty-eighth Annual Conference on Neural Information Processing Systems, Nov. 2024. +[303] R. Tamirisa, B. Bharathi, L. Phan, A. Zhou, A. Gatti, T. Suresh, M. Lin, J. Wang, R. Wang, R. Arel, A. Zou, D. Song, B. Li, D. Hendrycks, and M. Mazeika, "Tamper-Resistant Safeguards for Open-Weight LLMs," Feb. 2025. +[304] D. Rosati, J. Wehner, K. Williams, L. Bartoszcze, H. Sajjad, and F. Rudzicz, "Immunization against harmful fine-tuning attacks," in Findings of the Association for Computational Linguistics: EMNLP 2024. Association for Computational Linguistics, 2024. +[305] M. Mazeika, L. Phan, X. Yin, A. Zou, Z. Wang, N. Mu, E. Sakhaee, N. Li, S. Basart, B. Li et al., "Harmbench: A standardized evaluation framework for automated red teaming and robust refusal," arXiv preprint arXiv:2402.04249, 2024. +[306] P. Chao, E. Debenedetti, A. Robey, M. Andriushchenko, F. Croce, V. Sehwag, E. Dobriban, N. Flammarion, G. J. Pappas, F. Tramer et al., "Jailbreakbench: An open robustness benchmark for jailbreaking large language models," arXiv preprint arXiv:2404.01318, 2024. +[307] S. Liu, S. Cui, H. Bu, Y. Shang, and X. Zhang, "Jail-bench: A comprehensive chinese security assessment benchmark for large language models," arXiv preprint arXiv:2502.18935, 2025. +[308] J. Cui, W.-L. Chiang, I. Stoica, and C.-J. Hsieh, "Or-bench: An over-refusal benchmark for large language models," arXiv preprint arXiv:2405.20947, 2024. +[309] T. Xie, X. Qi, Y. Zeng, Y. Huang, U. M. Sehwag, K. Huang, L. He, B. Wei, D. Li, Y. Sheng et al., "Sorry-bench: Systematically evaluating large language model safety refusal behaviors," arXiv preprint arXiv:2406.14598, 2024. +[310] L. Zheng, W.-L. Chiang, Y. Sheng, S. Zhuang, Z. Wu, Y. Zhuang, Z. Lin, Z. Li, D. Li, E. Xing et al., "Judging llm-as-a-judge with mt-bench and chatbot arena," Advances in Neural Information Processing Systems, vol. 36, pp. 46595-46623, 2023. +[311] Z. Wang, S. Hu, S. Zhao, X. Lin, F. Juefei-Xu, Z. Li, L. Han, H. Subramanyam, L. Chen, J. Chen et al., "Mllm-as-a-judge for image safety without human labeling," arXiv preprint arXiv:2501.00192, 2024. +[312] D. Rosati, J. Wehner, K. Williams, L. Bartoszcze, D. Atanasov, R. Gonzales, S. Majumdar, C. Maple, + +H. Sajjad, and F. Rudzicz, "Representation noising effectively prevents harmful fine-tuning on llms," arXiv e-prints, pp. arXiv-2405, 2024. +[313] H. Zhang, J. Huang, K. Mei, Y. Yao, Z. Wang, C. Zhan, H. Wang, and Y. Zhang, "Agent security bench (ASB): Formalizing and benchmarking attacks and defenses in LLM-based agents," in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=V4y0CpX4hK +[314] T. Yuan, Z. He, L. Dong, Y. Wang, R. Zhao, T. Xia, L. Xu, B. Zhou, F. Li, Z. Zhang et al., "R-judge: Benchmarking safety risk awareness for llm agents," arXiv preprint arXiv:2401.10019, 2024. +[315] Z. Zhang, L. Lei, L. Wu, R. Sun, Y. Huang, C. Long, X. Liu, X. Lei, J. Tang, and M. Huang, "Safetybench: Evaluating the safety of large language models," arXiv preprint arXiv:2309.07045, 2023. +[316] L. Li, B. Dong, R. Wang, X. Hu, W. Zuo, D. Lin, Y. Qiao, and J. Shao, "Salad-bench: A hierarchical and comprehensive safety benchmark for large language models," arXiv preprint arXiv:2402.05044, 2024. +[317] K. Cobbe, V. Kosaraju, M. Bavarian, M. Chen, H. Jun, L. Kaiser, M. Plappert, J. Tworek, J. Hilton, R. Nakano et al., "Training verifiers to solve math word problems," arXiv preprint arXiv:2110.14168, 2021. +[318] S.-Y. Miao, C.-C. Liang, and K.-Y. Su, "A diverse corpus for evaluating and developing english math word problem solvers," arXiv preprint arXiv:2106.15772, 2021. +[319] E. Glazer, E. Erdil, T. Besiroglu, D. Chicharro, E. Chen, A. Gunning, C. F. Olsson, J.-S. Denain, A. Ho, E. d. O. Santos et al., "Frontiermath: A benchmark for evaluating advanced mathematical reasoning in ai," arXiv preprint arXiv:2411.04872, 2024. +[320] M. Chen, J. Tworek, H. Jun, Q. Yuan, H. P. D. O. Pinto, J. Kaplan, H. Edwards, Y. Burda, N. Joseph, G. Brockman et al., "Evaluating large language models trained on code," arXiv preprint arXiv:2107.03374, 2021. +[321] C. E. Jimenez, J. Yang, A. Wettig, S. Yao, K. Pei, O. Press, and K. Narasimhan, "Swe-bench: Can language models resolve real-world github issues?" arXiv preprint arXiv:2310.06770, 2023. +[322] X. Zhang, J. Zhao, and Y. LeCun, "Character-level convolutional networks for text classification," Advances in neural information processing systems, vol. 28, 2015. +[323] H. Luo, Y. Jin, X. Liu, T. Shang, R. Chen, and Z. Liu, "Geic: Universal and multilingual named entity recognition with large language models," arXiv preprint arXiv:2409.11022, 2024. +[324] X. Li, T. Zhang, Y. Dubois, R. Taori, I. Gulrajani, C. Guestrin, P. Liang, and T. B. Hashimoto, "Alpaca-eval: An automatic evaluator of instruction-following models," 2023. +[325] W.-L. Chiang, L. Zheng, Y. Sheng, A. N. Angelopoulos, T. Li, D. Li, B. Zhu, H. Zhang, M. Jordan, J. E. Gonzalez et al., "Chatbot arena: An open platform for evaluating llms by human preference," in *Forty-first International Conference on Machine Learning*, 2024. +[326] B. Gliwa, I. Mochol, M. Biesek, and A. Wawer, "Samsum corpus: A human-annotated dialogue + +dataset for abstractive summarization," arXiv preprint arXiv:1911.12237, 2019. +[327] M. Macháček and O. Bojar, "Results of the wmt14 metrics shared task," in Proceedings of the Ninth Workshop on Statistical Machine Translation, 2014, pp. 293-301. +[328] X. Lu, D. Liu, Y. Yu, L. Xu, and J. Shao, "X-boundary: Establishing exact safety boundary to shield llms from multi-turn jailbreaks without compromising usability," arXiv preprint arXiv:2502.09990, 2025. +[329] OpenAI, "Moderation api," https://platform.openai.com/docs/guides/moderation/overview, 2023. +[330] H. Inan, K. Upasani, J. Chi, R. Rungta, K. Iyer, Y. Mao, M. Tontchev, Q. Hu, B. Fuller, D. Testuggine, and M. Khabsa, "Llama guard: Llm-based input-output safeguard for human-ai conversations," CoRR, 2023. +[331] J. Ji, T. Qiu, B. Chen, B. Zhang, H. Lou, K. Wang, Y. Duan, Z. He, J. Zhou, Z. Zhang et al., "Ai alignment: A comprehensive survey," arXiv preprint arXiv:2310.19852, 2023. +[332] T. A. Qiu, Y. Zhang, X. Huang, J. Li, J. Ji, and Y. Yang, "Progressgym: Alignment with a millennium of moral progress," Advances in Neural Information Processing Systems, vol. 37, pp. 14570-14607, 2024. +[333] B. Wang, W. Chen, H. Pei, C. Xie, M. Kang, C. Zhang, C. Xu, Z. Xiong, R. Dutta, R. Schaeffer et al., "Decoding trust: A comprehensive assessment of trustworthiness in gpt models." in NeurIPS, 2023. +[334] S. Gehman, S. Gururangan, M. Sap, Y. Choi, and N. A. Smith, "Realtoxicityprompts: Evaluating neural toxic degeneration in language models," arXiv preprint arXiv:2009.11462, 2020. +[335] Y. Wang, H. Li, X. Han, P. Nakov, and T. Baldwin, "Do-not-answer: A dataset for evaluating safeguards in llms," arXiv preprint arXiv:2308.13387, 2023. +[336] M. Conover, R. Staats, A. Rane, G. Shani, K. Katz, A. Powell, A. Ross, A. Maas, and A. Zhang, "Databricks-dolly: Introducing dolly-15k, democratizing the magic of instruction following," https://github.com/databrickslabs/dolly, 2023. +[337] X. Wu, Y. Hao, K. Sun, Y. Chen, F. Zhu, R. Zhao, and H. Li, "Human preference score v2: A solid benchmark for evaluating human preferences of text-to-image synthesis," arXiv preprint arXiv:2306.09341, 2023. +[338] Y. Yan, S. Wang, J. Huo, H. Li, B. Li, J. Su, X. Gao, Y.-F. Zhang, T. Xu, Z. Chu et al., "Errorradar: Benchmarking complex mathematical reasoning of multimodal large language models via error detection," arXiv preprint arXiv:2410.04509, 2024. +[339] Q. Jin, B. Dhingra, Z. Liu, W. W. Cohen, and X. Lu, "Pubmedqa: A dataset for biomedical research question answering," arXiv preprint arXiv:1909.06146, 2019. +[340] K. M. Hermann, T. Kocisky, E. Grefenstette, L. Espeholt, W. Kay, M. Suleyman, and P. Blunsom, "Teaching machines to read and comprehend," Advances in neural information processing systems, vol. 28, 2015. +[341] S. Lin, J. Hilton, and O. Evans, "Truthfulqa: Measuring how models mimic human falsehoods," arXiv preprint arXiv:2109.07958, 2021. +[342] Y. Mou, S. Zhang, and W. Ye, "Sg-bench: Evaluating llm safety generalization across diverse tasks and + +prompt types," Advances in Neural Information Processing Systems, vol. 37, pp. 123032-123054, 2024. +[343] F. Jiang, Z. Xu, Y. Li, L. Niu, Z. Xiang, B. Li, B. Y. Lin, and R. Poovendran, "Safechain: Safety of language models with long chain-of-thought reasoning capabilities," arXiv preprint arXiv:2502.12025, 2025. +[344] T. Hartvigsen, S. Gabriel, H. Palangi, M. Sap, D. Ray, and E. Kamar, "Toxigen: A large-scale machine-generated dataset for adversarial and implicit hate speech detection," arXiv preprint arXiv:2203.09509, 2022. +[345] A. Souly, Q. Lu, D. Bowen, T. Trinh, E. Hsieh, S. Pandey, P. Abbeel, J. Svegliato, S. Emmons, O. Watkins et al., "A strongreject for empty jailbreaks," arXiv preprint arXiv:2402.10260, 2024. +[346] L. Jiang, K. Rao, S. Han, A. Ettinger, F. Brahman, S. Kumar, N. Mireshghallah, X. Lu, M. Sap, Y. Choi et al., "Wildteaming at scale: From in-the-wild jailbreaks to (adversarily) safer language models," Advances in Neural Information Processing Systems, vol. 37, pp. 47094-47165, 2024. +[347] D. Hendrycks, M. Mazeika, and T. Woodside, "An overview of catastrophic ai risks," arXiv preprint arXiv:2306.12001, 2023. +[348] B. Baker, J. Huizinga, L. Gao, Z. Dou, M. Y. Guan, A. Madry, W. Zaremba, J. Pachocki, and D. Farhi, "Monitoring reasoning models for misbehavior and the risks of promoting obfuscation," arXiv preprint arXiv:2503.11926, 2025. +[349] T. Hagendorff, "Deception abilities emerged in large language models," Proceedings of the National Academy of Sciences, vol. 121, no. 24, p. e2317967121, 2024. [Online]. Available: https://www.pnas.org/doi/abs/10.1073/pnas.2317967121 +[350] P. S. Park, S. Goldstein, A. O'Gara, M. Chen, and D. Hendrycks, "Ai deception: A survey of examples, risks, and potential solutions," Patterns, vol. 5, no. 5, 2024. +[351] OpenAI, "Gpt-4 technical report," ArXiv, vol. abs/2303.08774, 2023. +[352] F. Ward, F. Toni, F. Belardinelli, and T. Everitt, "Honesty is the best policy: defining and mitigating ai deception," Advances in neural information processing systems, vol. 36, pp. 2313-2341, 2023. +[353] J. Scheurer, M. Balesni, and M. Hobbahn, "Large language models can strategically deceive their users when put under pressure," arXiv preprint arXiv:2311.07590, 2023. +[354] S. Chern, Z. Hu, Y. Yang, E. Chern, Y. Guo, J. Jin, B. Wang, and P. Liu, "Behonest: Benchmarking honesty in large language models," arXiv preprint arXiv:2406.13261, 2024. +[355] A. O'Gara, "Hoodwinked: Deception and cooperation in a text-based game for language models," arXiv preprint arXiv:2308.01404, 2023. +[356] M. F. A. R. D. T. (FAIR)†, A. Bakhtin, N. Brown, E. Dinan, G. Farina, C. Flaherty, D. Fried, A. Goff, J. Gray, H. Hu et al., "Human-level play in the game of diplomacy by combining language models with strategic reasoning," Science, vol. 378, no. 6624, pp. 1067-1074, 2022. + +[357] L. Schulz, N. Alon, J. Rosenschein, and P. Dayan, "Emergent deception and skepticism via theory of mind," in First Workshop on Theory of Mind in Communicating Agents, 2023. +[358] A. Meinke, B. Schoen, J. Scheurer, M. Balesni, R. Shah, and M. Hobbahn, "Frontier models are capable of in-context scheming," arXiv preprint arXiv:2412.04984, 2024. +[359] R. Greenblatt, C. Denison, B. Wright, F. Roger, M. Mac-Diarmid, S. Marks, J. Treutlein, T. Belonax, J. Chen, D. Duvenaud et al., "Alignment faking in large language models," arXiv preprint arXiv:2412.14093, 2024. +[360] A. Pan, J. S. Chan, A. Zou, N. Li, S. Basart, T. Woodside, H. Zhang, S. Emmons, and D. Hendrycks, "Do the rewards justify the means? measuring trade-offs between rewards and ethical behavior in the machiavelli benchmark," in International conference on machine learning. PMLR, 2023, pp. 26837-26867. +[361] L. Vaugrante, F. Carlon, M. Menke, and T. Hagen-dorff, "Compromising honesty and harmlessness in language models via deception attacks," arXiv preprint arXiv:2502.08301, 2025. +[362] J. Ji, K. Wang, T. Qiu, B. Chen, J. Zhou, C. Li, H. Lou, and Y. Yang, "Language models resist alignment," arXiv preprint arXiv:2406.06144, 2024. +[363] L. Bürger, F. A. Hamprecht, and B. Nadler, "Truth is universal: Robust detection of lies in llms," Advances in Neural Information Processing Systems, vol. 37, pp. 138-393-138-431, 2024. +[364] OpenAI, "Detecting misbehavior in frontier reasoning models," https://openai.com/index/chain-of-thought-monitoring/, Mar. 2025, accessed: 2025-05-14. +[365] T. Everitt, V. Krakovna, L. Orseau, M. Hutter, and S. Legg, "Reinforcement learning with a corrupted reward channel," arXiv preprint arXiv:1705.08417, 2017. +[366] S. Zhuang and D. Hadfield-Menell, "Consequences of misaligned ai," Advances in Neural Information Processing Systems, vol. 33, pp. 15763-15773, 2020. +[367] V. Krakovna, J. Uesato, V. Mikulik, M. Rahtz, T. Everitt, R. Kumar, Z. Kenton, J. Leike, and S. Legg, "Specification gaming: the flip side of ai ingenuity," 2020, accessed: 2025-03-30. [Online]. Available: https://deepmind.google/discover/blog/ specification-gaming-the-flip-side-of-ai-ingenuity/ +[368] D. Amodei, C. Olah, J. Steinhardt, P. Christiano, J. Schulman, and D. Mané, "Concrete problems in air safety," arXiv preprint arXiv:1606.06565, 2016. +[369] L. Weng, "Reward hacking in reinforcement learning," 2024, accessed: 2025-03-30. [Online]. Available: https://lilianweng.github.io/posts/2024-11-28-reward-hacking +[370] T. Everitt, M. Hutter, R. Kumar, and V. Krakovna, "Reward tampering problems and solutions in reinforcement learning: A causal influence diagram perspective," Synthese, vol. 198, no. Suppl 27, pp. 6435-6467, 2021. +[371] J. Skalse, N. Howe, D. Krasheninnikov, and D. Krueger, "Defining and characterizing reward gaming," Advances in Neural Information Processing Systems, vol. 35, pp. 9460-9471, 2022. + +[372] S. Casper, X. Davies, C. Shi, T. K. Gilbert, J. Scheurer, J. Rando, R. Freedman, T. Korbak, D. Lindner, P. Freire et al., "Open problems and fundamental limitations of reinforcement learning from human feedback," arXiv preprint arXiv:2307.15217, 2023. +[373] L. Gao, J. Schulman, and J. Hilton, "Scaling laws for reward model overoptimization," in International Conference on Machine Learning. PMLR, 2023, pp. 10835-10866. +[374] E. Perez, S. Ringer, K. Lukosiute, K. Nguyen, E. Chen, S. Heiner, C. Pettit, C. Olsson, S. Kundu, S. Kadavath et al., "Discovering language model behaviors with model-written evaluations," in Findings of the Association for Computational Linguistics: ACL 2023, 2023, pp. 13387-13434. +[375] C. Denison, M. MacDiarmid, F. Berez, D. Duvenaud, S. Kravec, S. Marks, N. Schiefer, R. Soklaski, A. Tamkin, J. Kaplan et al., "Sycophancy to subterfuge: Investigating reward-tampering in large language models," arXiv preprint arXiv:2406.10162, 2024. +[376] P. Singhal, T. Goyal, J. Xu, and G. Durrett, "A long way to go: Investigating length correlations in rlhf," arXiv preprint arXiv:2310.03716, 2023. +[377] F. Bianchi, M. Suzgun, G. Attanasio, P. Röttger, D. Jurafsky, T. Hashimoto, and J. Zou, "Safety-tuned llamas: Lessons from improving the safety of large language models that follow instructions," arXiv preprint arXiv:2309.07875, 2023. +[378] M. Tegmark and S. Omohundro, "Provably safe systems: the only path to controllable agi," arXiv preprint arXiv:2309.01933, 2023. +[379] D. Dalrymple, J. Skalse, Y. Bengio, S. Russell, M. Tegmark, S. Seshia, S. Omohundro, C. Szegedy, B. Goldhaber, N. Ammann et al., "Towards guaranteed safe ai: A framework for ensuring robust and reliable ai systems," arXiv preprint arXiv:2405.06624, 2024. +[380] A. Caliskan, J. J. Bryson, and A. Narayanan, "Semantics derived automatically from language corpora contain human-like biases," Science, vol. 356, no. 6334, pp. 183-186, 2017. +[381] R. Xu, Z. Zhou, T. Zhang, Z. Qi, S. Yao, K. Xu, W. Xu, and H. Qiu, "Walking in others' shoes: How perspective-taking guides large language models in reducing toxicity and bias," arXiv preprint arXiv:2407.15366, 2024. +[382] D. Acemoglu and P. Restrepo, "Artificial intelligence, automation, and work," in The economics of artificial intelligence: An agenda. University of Chicago Press, 2018, pp. 197-236. +[383] J. Mokander, J. Schuett, H. R. Kirk, and L. Floridi, "Auditing large language models: a three-layered approach," AI and Ethics, vol. 4, no. 4, pp. 1085-1115, 2024. +[384] M. Anderljung, J. Barnhart, A. Korinek, J. Leung, C. O'Keefe, J. Whittlestone, S. Avin, M. Brundage, J. Bullock, D. Cass-Beggs et al., "Frontier ai regulation: Managing emerging risks to public safety," arXiv preprint arXiv:2307.03718, 2023. +[385] A. Mannes, "Governance, risk, and artificial intelligence," *Ai Magazine*, vol. 41, no. 1, pp. 61-69, 2020. +[386] L. Koessler and J. Schuett, "Risk assessment at agi + +companies: A review of popular risk assessment techniques from other safety-critical industries," arXiv preprint arXiv:2307.08823, 2023. +[387] J. Schuett, N. Dreksler, M. Anderljung, D. McCaffary, L. Heim, E. Bluemke, and B. Garfinkel, "Towards best practices in agi safety and governance: A survey of expert opinion," arXiv preprint arXiv:2305.07153, 2023. +[388] L. Ho, J. Barnhart, R. Trager, Y. Bengio, M. Brundage, A. Carnegie, R. Chowdhury, A. Dafoe, G. Hadfield, M. Levi et al., "International institutions for advanced ai," arXiv preprint arXiv:2307.04699, 2023. +[389] M. M. Maas, "Aligning ai regulation to sociotechnical change," in The Oxford Handbook of AI Governance, 2022. +[390] M. Kinniment, L. J. K. Sato, H. Du, B. Goodrich, M. Hasin, L. Chan, L. H. Miles, T. R. Lin, H. Wijk, J. Burget et al., "Evaluating language-model agents on realistic autonomous tasks," arXiv preprint arXiv:2312.11671, 2023. +[391] J. Tallberg, E. Erman, M. Furendal, J. Geith, M. Klamberg, and M. Lundgren, "The global governance of artificial intelligence: Next steps for empirical and normative research," International Studies Review, vol. 25, no. 3, p. viad040, 2023. +[392] OECD, "OECD Principles on Artificial Intelligence," https://oecd.ai/en/ai-principles, 2019. +[393] UNESCO, "Recommendation on the Ethics of Artificial Intelligence," https://unesdoc.unesco.org/ark:/48223/pf0000381137, 2021. +[394] E. Seger, N. Dreksler, R. Moulange, E. Dardaman, J. Schuett, K. Wei, C. Winter, M. Arnold, S. O. hEigeartaigh, A. Korinek et al., "Open-sourcing highly capable foundation models: An evaluation of risks, benefits, and alternative methods for pursuing open-source objectives," arXiv preprint arXiv:2311.09227, 2023. +[395] F. Urbina, F. Lentzos, C. Invernizzi, and S. Ekins, "Dual use of artificial-intelligence-powered drug discovery," Nature machine intelligence, vol. 4, no. 3, pp. 189-191, 2022. +[396] Meta, "Meta and Microsoft introduce the next generation of Llama," https://ai.meta.com/blog/llama-2, 2023. +[397] E. Mostaque, "Democratizing ai, stable diffusion & generative models," https://exchange scale.com/public/videos/emad-mostaque-stability-ai-stable-diffusion-open-sou2022. +[398] J. A. Goldstein, G. Sastry, M. Musser, R. DiResta, M. Gentzel, and K. Sedova, "Generative language models and automated influence operations: Emerging threats and potential mitigations," arXiv preprint arXiv:2301.04246, 2023. +[399] I. Solaiman, M. Brundage, J. Clark, A. Askell, A. Herbert-Voss, J. Wu, A. Radford, G. Krueger, J. W. Kim, S. Kreps et al., "Release strategies and the social impacts of language models," arXiv preprint arXiv:1908.09203, 2019. +[400] P. Chavez, "An ai challenge: Balancing open and closed systems," https://cepa.org/article/an-ai-challenge-balancing-open-and-closed-systems, + +2023. +[401] N. Zhang, Y. Yao, B. Tian, P. Wang, S. Deng, M. Wang, Z. Xi, S. Mao, J. Zhang, Y. Ni et al., "A comprehensive study of knowledge editing for large language models," arXiv preprint arXiv:2401.01286, 2024. +[402] J. Fang, H. Jiang, K. Wang, Y. Ma, X. Wang, X. He, and T.-s. Chua, "Alphaedit: Null-space constrained knowledge editing for language models," arXiv preprint arXiv:2410.02355, 2024. +[403] Z. Zhang, Y. Zhou, X. Zhao, T. Che, and L. Lyu, "Prompt certified machine unlearning with randomized gradient smoothing and quantization," Advances in Neural Information Processing Systems, vol. 35, pp. 13433-13455, 2022. +[404] T. Che, Y. Zhou, Z. Zhang, L. Lyu, J. Liu, D. Yan, D. Dou, and J. Huan, "Fast federated machine unlearning with nonlinear functional theory," in International conference on machine learning. PMLR, 2023, pp. 4241-4268. +[405] W. Wang, Z. Tian, C. Zhang, and S. Yu, "Machine unlearning: A comprehensive survey," arXiv preprint arXiv:2405.07406, 2024. +[406] S. Liu, Y. Yao, J. Jia, S. Casper, N. Baracaldo, P. Hase, Y. Yao, C. Y. Liu, X. Xu, H. Li et al., "Rethinking machine unlearning for large language models," Nature Machine Intelligence, pp. 1-14, 2025. +[407] Y. Yao, X. Xu, and Y. Liu, "Large language model unlearning," Advances in Neural Information Processing Systems, vol. 37, pp. 105-425-105-475, 2025. +[408] C. Ding, J. Wu, Y. Yuan, J. Lu, K. Zhang, A. Su, X. Wang, and X. He, "Unified parameter-efficient unlearning for llms," arXiv preprint arXiv:2412.00383, 2024. +[409] Z. Li, H. Jiang, H. Chen, B. Bi, Z. Zhou, F. Sun, J. Fang, and X. Wang, "Reinforced lifelong editing for language models," arXiv preprint arXiv:2502.05759, 2025. +[410] E. Mitchell, C. Lin, A. Bosselut, C. Finn, and C. D. Manning, "Fast model editing at scale," arXiv preprint arXiv:2110.11309, 2021. +[411] N. De Cao, W. Aziz, and I. Titov, "Editing factual knowledge in language models," arXiv preprint arXiv:2104.08164, 2021. +[412] P. Wang, Z. Li, N. Zhang, Z. Xu, Y. Yao, Y. Jiang, P. Xie, F. Huang, and H. Chen, "Wise: Rethinking the knowledge memory for lifelong model editing of large language models," arXiv preprint arXiv:2405.14768, 2024. +[413] T. Hartvigsen, S. Sankaranarayanan, H. Palangi, Y. Kim, and M. Ghassemi, "Aging with grace: Lifelong model editing with discrete key-value adaptors," Advances in Neural Information Processing Systems, vol. 36, 2024. +[414] H. Jiang, J. Fang, N. Zhang, G. Ma, M. Wan, X. Wang, X. He, and T.-s. Chua, "Anyedit: Edit any knowledge encoded in language models," arXiv preprint arXiv:2502.05628, 2025. +[415] H. Jiang, J. Fang, T. Zhang, A. Zhang, R. Wang, T. Liang, and X. Wang, "Neuron-level sequential editing for large language models," arXiv preprint arXiv:2410.04045, 2024. +[416] K. Meng, D. Bau, A. Andonian, and Y. Belinkov, + +"Locating and editing factual associations in gpt," Advances in Neural Information Processing Systems, vol. 35, pp. 17359-17372, 2022. +[417] A. Prasad, P. Hase, X. Zhou, and M. Bansal, "Grips: Gradient-free, edit-based instruction search for prompting large language models," arXiv preprint arXiv:2203.07281, 2022. +[418] G. Gangadhar and K. Stratos, "Model editing by standard fine-tuning," arXiv preprint arXiv:2402.11078, 2024. +[419] E. Mitchell, C. Lin, A. Bosselut, C. D. Manning, and C. Finn, "Memory-based model editing at scale," in International Conference on Machine Learning. PMLR, 2022, pp. 15817-15831. +[420] Y. Yao, P. Wang, B. Tian, S. Cheng, Z. Li, S. Deng, H. Chen, and N. Zhang, "Editing large language models: Problems, methods, and opportunities," arXiv preprint arXiv:2305.13172, 2023. +[421] K. Meng, A. S. Sharma, A. Andonian, Y. Belinkov, and D. Bau, "Mass-editing memory in a transformer," arXiv preprint arXiv:2210.07229, 2022. +[422] J.-C. Gu, H.-X. Xu, J.-Y. Ma, P. Lu, Z.-H. Ling, K.-W. Chang, and N. Peng, "Model editing can hurt general abilities of large language models," arXiv e-prints, pp. arXiv-2401, 2024. +[423] X. Li, S. Li, S. Song, J. Yang, J. Ma, and J. Yu, "Pmet: Precise model editing in a transformer," in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 17, 2024, pp. 18564-18572. +[424] M. Zhang, X. Ye, Q. Liu, P. Ren, S. Wu, and Z. Chen, "Knowledge graph enhanced large language model editing," arXiv preprint arXiv:2402.13593, 2024. +[425] C. Chen, B. Huang, Z. Li, Z. Chen, S. Lai, X. Xu, J.-C. Gu, J. Gu, H. Yao, C. Xiao et al., "Can editing llms inject harm?" arXiv preprint arXiv:2407.20224, 2024. +[426] M. Wang, N. Zhang, Z. Xu, Z. Xi, S. Deng, Y. Yao, Q. Zhang, L. Yang, J. Wang, and H. Chen, "Detoxifying large language models via knowledge editing," arXiv preprint arXiv:2403.14472, 2024. +[427] C. Zheng, L. Li, Q. Dong, Y. Fan, Z. Wu, J. Xu, and B. Chang, "Can we edit factual knowledge by in-context learning?" arXiv preprint arXiv:2305.12740, 2023. +[428] Y. Li, T. Li, K. Chen, J. Zhang, S. Liu, W. Wang, T. Zhang, and Y. Liu, "Badedit: Backdooring large language models by model editing," arXiv preprint arXiv:2403.13355, 2024. +[429] K. Grimes, M. Christiani, D. Shriver, and M. Connor, "Concept-rot: Poisoning concepts in large language models with model editing," arXiv preprint arXiv:2412.13341, 2024. +[430] X. Wu, J. Li, M. Xu, W. Dong, S. Wu, C. Bian, and D. Xiong, "Depn: Detecting and editing privacy neurons in pretrained language models," arXiv preprint arXiv:2310.20138, 2023. +[431] X. Li, Z. Li, Y. Kosuga, Y. Yoshida, and V. Bian, "Precision knowledge editing: Enhancing safety in large language models," arXiv preprint arXiv:2410.03772, 2024. +[432] X. Hu, D. Li, B. Hu, Z. Zheng, Z. Liu, and M. Zhang, "Separate the wheat from the chaff: Model deficiency unlearning via parameter-efficient module op + +eration," in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 16, 2024, pp. 18252-18260. +[433] T. Yang, L. Dai, Z. Liu, X. Wang, M. Jiang, Y. Tian, and X. Zhang, "Cliperase: Efficient unlearning of visual-textual associations in clip," arXiv preprint arXiv:2410.23330, 2024. +[434] R. Gandikota, J. Materzynska, J. Fiotto-Kaufman, and D. Bau, "Erasing concepts from diffusion models," 2023 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 2426-2436, 2023. +[435] E. Zhang, K. Wang, X. Xu, Z. Wang, and H. Shi, "Forget-me-not: Learning to forget in text-to-image diffusion models," 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pp. 1755-1764, 2023. +[436] C. Fan, J. Liu, Y. Zhang, D. Wei, E. Wong, and S. Liu, "Salun: Empowering machine unlearning via gradient-based weight saliency in both image classification and generation," ArXiv, vol. abs/2310.12508, 2023. +[437] Z. Huang, X. Cheng, J. Zheng, H. Wang, Z. He, T. Li, and X. Huang, "Unified gradient-based machine unlearning with remain geometry enhancement," ArXiv, vol. abs/2409.19732, 2024. +[438] A. Blanco-Justicia, J. Domingo-Ferrer, N. M. Jebreel, B. Manzanares-Salor, and D. Sánchez, "Unlearning in large language models: We are not there yet," Computer, vol. 58, no. 1, pp. 97-100, 2025. +[439] S. Dai, C. Xu, S. Xu, L. Pang, Z. Dong, and J. Xu, "Bias and unfairness in information retrieval systems: New challenges in the llm era," in Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, 2024, pp. 6437-6447. +[440] G. Nicolas and A. Caliskan, "A taxonomy of stereotype content in large language models," arXiv preprint arXiv:2408.00162, 2024. +[441] S. Wang, R. Li, X. Chen, Y. Yuan, D. F. Wong, and M. Yang, "Exploring the impact of personality traits on llm bias and toxicity," arXiv preprint arXiv:2502.12566, 2025. +[442] A. Liu, Q. Sheng, and X. Hu, "Preventing and detecting misinformation generated by large language models," in Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval, 2024, pp. 3001-3004. +[443] Q. Zhang, H. Qiu, D. Wang, H. Qian, Y. Li, T. Zhang, and M. Huang, "Understanding the dark side of lms' intrinsic self-correction," arXiv preprint arXiv:2412.14959, 2024. +[444] R. Xu, B. Lin, S. Yang, T. Zhang, W. Shi, T. Zhang, Z. Fang, W. Xu, and H. Qiu, "The earth is flat because...: Investigating llms' belief towards misinformation via persuasive conversation," in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2024, pp. 16259-16303. +[445] Z. Liu, G. Dou, Z. Tan, Y. Tian, and M. Jiang, "Machine unlearning in generative ai: A survey," arXiv preprint arXiv:2407.20516, 2024. +[446] Y. Qu, M. Ding, N. Sun, K. Thilakarathna, T. Zhu, and D. Niyato, "The frontier of data erasure: Machine + +unlearning for large language models," arXiv preprint arXiv:2403.15779, 2024. +[447] A. Blanco-Justicia, N. Jebreel, B. Manzanares-Salor, D. Sánchez, J. Domingo-Ferrer, G. Collell, and K. Eeik Tan, "Digital forgetting in large language models: A survey of unlearning methods," Artificial Intelligence Review, vol. 58, no. 3, p. 90, 2025. +[448] N. Li, C. Zhou, Y. Gao, H. Chen, Z. Zhang, B. Kuang, and A. Fu, "Machine unlearning: Taxonomy, metrics, applications, challenges, and prospects," IEEE Transactions on Neural Networks and Learning Systems, 2025. +[449] C. Gao, L. Wang, C. Weng, X. Wang, and Q. Zhu, "Practical unlearning for large language models," arXiv preprint arXiv:2407.10223, 2024. +[450] P. Thaker, S. Hu, N. Kale, Y. Maurya, Z. S. Wu, and V. Smith, "Position: Llm unlearning benchmarks are weak measures of progress," arXiv preprint arXiv:2410.02879, 2024. +[451] K. Zhao, M. Kurmanji, G.-O. Bärbulescu, E. Triantafillou, and P. Triantafillou, "What makes unlearning hard and what to do about it," Advances in Neural Information Processing Systems, vol. 37, pp. 12293-12333, 2025. +[452] W. Wang, M. Zhang, X. Ye, Z. Ren, Z. Chen, and P. Ren, "Uipe: Enhancing llm unlearning by removing knowledge related to forgetting targets," arXiv preprint arXiv:2503.04693, 2025. +[453] H. Wang, Y. Jing, H. Sun, Y. Wang, J. Wang, J. Liao, and D. Tao, "Erasing without remembering: Safeguarding knowledge forgetting in large language models," arXiv preprint arXiv:2502.19982, 2025. +[454] T. Tran, R. Liu, and L. Xiong, "Tokens for learning, tokens for unlearning: Mitigating membership inference attacks in large language models via dual-purpose training," arXiv preprint arXiv:2502.19726, 2025. +[455] H. Xu, N. Zhao, L. Yang, S. Zhao, S. Deng, M. Wang, B. Hooi, N. Oo, H. Chen, and N. Zhang, "Relearn: Unlearning via learning for large language models," arXiv preprint arXiv:2502.11190, 2025. +[456] Q. Zhang, H. Qiu, D. Wang, Y. Li, T. Zhang, W. Zhu, H. Weng, L. Yan, and C. Zhang, "Large scale knowledge washing," in The Thirteenth International Conference on Learning Representations, 2025. +[457] A. Thudi, H. Jia, I. Shumailov, and N. Papernot, "On the necessity of auditable algorithmic definitions for machine unlearning," in 31st USENIX security symposium (USENIX Security 22), 2022, pp. 4007-4022. +[458] S. Goel, A. Prabhu, P. Torr, P. Kumaraguru, and A. Sanyal, "Corrective machine unlearning," Transactions on Machine Learning Research. +[459] A. Thudi, G. Deza, V. Chandrasekaran, and N. Papernot, "Unrolling sgd: Understanding factors influencing machine unlearning," in 2022 IEEE 7th European Symposium on Security and Privacy (EuroS&P). IEEE, 2022, pp. 303-319. +[460] B. Liu, Q. Liu, and P. Stone, "Continual learning and private unlearning," in Conference on Lifelong Learning Agents. PMLR, 2022, pp. 243-254. +[461] Q. P. Nguyen, B. K. H. Low, and P. Jaillet, "Variational bayesian unlearning," Advances in Neural Information Processing Systems, vol. 33, pp. 16025-16036, 2020. + +[462] L. Wang, T. Chen, W. Yuan, X. Zeng, K.-F. Wong, and H. Yin, "Kga: A general machine unlearning framework based on knowledge gap alignment," arXiv preprint arXiv:2305.06535, 2023. +[463] Y. Liu, Y. Zhang, T. Jaakkola, and S. Chang, "Revisiting who's harry potter: Towards targeted unlearning from a causal intervention perspective," arXiv preprint arXiv:2407.16997, 2024. +[464] P. Maini, Z. Feng, A. Schwarzschild, Z. C. Lipton, and J. Z. Kolter, "Tofu: A task of fictitious unlearning for llms," arXiv preprint arXiv:2401.06121, 2024. +[465] R. Zhang, L. Lin, Y. Bai, and S. Mei, "Negative preference optimization: From catastrophic collapse to effective unlearning," arXiv preprint arXiv:2404.05868, 2024. +[466] R. Rafailov, A. Sharma, E. Mitchell, C. D. Manning, S. Ermon, and C. Finn, "Direct preference optimization: Your language model is secretly a reward model," Advances in Neural Information Processing Systems, vol. 36, 2024. +[467] J. Huo, Y. Yan, X. Zheng, Y. Lyu, X. Zou, Z. Wei, and X. Hu, "Mmunlearner: Reformulating multimodal machine unlearning in the era of multimodal large language models," arXiv preprint arXiv:2502.11051, 2025. +[468] J. Li, Q. Wei, C. Zhang, G. Qi, M. Du, Y. Chen, and S. Bi, "Single image unlearning: Efficient machine unlearning in multimodal large language models," arXiv preprint arXiv:2405.12523, 2024. +[469] S. Xing, F. Zhao, Z. Wu, T. An, W. Chen, C. Li, J. Zhang, and X. Dai, "Efuf: Efficient fine-grained unlearning framework for mitigating hallucinations in multimodal large language models," ArXiv, vol. abs/2402.09801, 2024. +[470] T. Chakraborty, E. Shayegani, Z. Cai, N. B. Abu-Ghazaleh, M. S. Asif, Y. Dong, A. K. Roy-Chowdhury, and C. Song, "Cross-modal safety alignment: Is textual unlearning all you need?" ArXiv, vol. abs/2406.02575, 2024. +[471] J. Chen, Z. Deng, K. Zheng, Y. Yan, S. Liu, P. Wu, P. Jiang, J. Liu, and X. Hu, "Safeeraser: Enhancing safety in multimodal large language models through multimodal machine unlearning," arXiv preprint arXiv:2502.12520, 2025. +[472] G. Ilharco, M. T. Ribeiro, M. Wortsman, S. Gururangan, L. Schmidt, H. Hajishirzi, and A. Farhadi, "Editing models with task arithmetic," arXiv preprint arXiv:2212.04089, 2022. +[473] D. Jung, J. Seo, J. Lee, C. Park, and H. Lim, "Come: An unlearning-based approach to conflict-free model editing," arXiv preprint arXiv:2502.15826, 2025. +[474] B. Zhang, Z. Chen, Z. Zheng, J. Li, and H. Chen, "Resolving editing-unlearning conflicts: A knowledge codebook framework for large language model updating," arXiv preprint arXiv:2502.00158, 2025. +[475] R. Eldan and M. Russinovich, "Who's harry potter? approximate unlearning in llms," arXiv preprint arXiv:2310.02238, 2023. +[476] N. Li, A. Pan, A. Gopal, S. Yue, D. Berrios, A. Gatti, J. D. Li, A.-K. Dombrowski, S. Goel, L. Phan et al., "The wmdp benchmark: Measuring and re + +ducing malicious use with unlearning," arXiv preprint arXiv:2403.03218, 2024. +[477] M. Pawelczyk, S. Neel, and H. Lakkaraju, "In-context unlearning: Language models as few shot unlearners," arXiv preprint arXiv:2310.07579, 2023. +[478] P. Thaker, Y. Maurya, S. Hu, Z. S. Wu, and V. Smith, "Guardrail baselines for unlearning in llms," arXiv preprint arXiv:2403.03329, 2024. +[479] J. Ren, Z. Dai, X. Tang, H. Liu, J. Zeng, Z. Li, R. Goutam, S. Wang, Y. Xing, and Q. He, "A general framework to enhance fine-tuning-based llm unlearning," arXiv preprint arXiv:2502.17823, 2025. +[480] X. Zhao, W. Cai, T. Shi, D. Huang, L. Lin, S. Mei, and D. Song, "Improving llm safety alignment with dual-objective optimization," arXiv preprint arXiv:2503.03710, 2025. +[481] S. Takashiro, T. Kojima, A. Gambardella, Q. Cao, Y. Iwasawa, and Y. Matsuo, "Answer when needed, forget when not: Language models pretend to forget via in-context knowledge unlearning," arXiv preprint arXiv:2410.00382, 2024. +[482] A. Muresanu, A. Thudi, M. R. Zhang, and N. Papernot, "Unlearnable algorithms for in-context learning," arXiv preprint arXiv:2402.00751, 2024. +[483] Y. Zhou, X. Li, Q. Wang, and J. Shen, "Visual in-context learning for large vision-language models," arXiv preprint arXiv:2402.11574, 2024. +[484] Z. Liu, G. Dou, X. Yuan, C. Zhang, Z. Tan, and M. Jiang, "Modality-aware neuron pruning for unlearning in multimodal large language models," arXiv preprint arXiv:2502.15910, 2025. +[485] N. Yang, M. Kim, S. Yoon, J. Shin, and K. Jung, "Faithun: Toward faithful forgetting in language models by investigating the interconnectedness of knowledge," arXiv preprint arXiv:2502.19207, 2025. +[486] A. Ramakrishna, Y. Wan, X. Jin, K.-W. Chang, Z. Bu, B. Vinzamuri, V. Cevher, M. Hong, and R. Gupta, "Lume: Llm unlearning with multitask evaluations," arXiv preprint arXiv:2502.15097, 2025. +[487] Y. Lang, K. Guo, Y. Huang, Y. Zhou, H. Zhuang, T. Yang, Y. Su, and X. Zhang, "Beyond single-value metrics: Evaluating and enhancing llm unlearning with cognitive diagnosis," arXiv preprint arXiv:2502.13996, 2025. +[488] Q. Wang, J. P. Zhou, Z. Zhou, S. Shin, B. Han, and K. Q. Weinberger, "Rethinking llm unlearning objectives: A gradient perspective and go beyond," arXiv preprint arXiv:2502.19301, 2025. +[489] M. Khoriaty, A. Shportko, G. Mercier, and Z. Wood-Doughty, "Don't forget it! conditional sparse autoencoder clamping works for unlearning," arXiv preprint arXiv:2503.11127, 2025. +[490] J. Cheng and H. Amiri, "Mu-bench: A multitask multimodal benchmark for machine unlearning," arXiv preprint arXiv:2406.14796, 2024. +[491] V. Patil, Y.-L. Sung, P. Hase, J. Peng, T. Chen, and M. Bansal, "Unlearning sensitive information in multimodal llms: Benchmark and attack-defense evaluation," Transactions on Machine Learning Research. +[492] Y. Ma, J. Wang, F. Wang, S. Ma, J. Li, X. Li, F. Huang, L. Sun, B. Li, Y. Choi et al., "Benchmarking vision lan + +guage model unlearning via fictitious facial identity dataset," arXiv preprint arXiv:2411.03554, 2024. +[493] S. Moon, M. Lee, S. Park, and D. Kim, “Holistic unlearning benchmark: A multi-faceted evaluation for text-to-image diffusion model unlearning,” arXiv preprint arXiv:2410.05664, 2024. +[494] D. Sanyal and M. Mandal, "Alu: Agentic llm unlearning," arXiv preprint arXiv:2502.00406, 2025. +[495] J. Cheng and H. Amiri, "Tool unlearning for tool-augmented llms," arXiv preprint arXiv:2502.01083, 2025. +[496] H. Liu, P. Xiong, T. Zhu, and S. Y. Philip, "A survey on machine unlearning: Techniques and new emerged privacy risks," Journal of Information Security and Applications, vol. 90, p. 104010, 2025. +[497] S. Qureshi, T. Shaik, X. Tao, H. Xie, L. Li, J. Yong, and X. Jia, "Exploring incremental unlearning: Techniques, challenges, and future directions," arXiv preprint arXiv:2502.16708, 2025. +[498] J. Geng, Q. Li, H. Woisetschlaeger, Z. Chen, Y. Wang, P. Nakov, H.-A. Jacobsen, and F. Karray, "A comprehensive survey of machine unlearning techniques for large language models," arXiv preprint arXiv:2503.01854, 2025. +[499] X. He, C. Chen, L. Lyu, and Q. Xu, "Extracted bert model leaks more information than you think!" in Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, EMNLP 2022. Association for Computational Linguistics, 2022, pp. 1530-1537. +[500] X. He, Q. Xu, L. Lyu, F. Wu, and C. Wang, "Protecting intellectual property of language generation apis with lexical watermark," in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 36, no. 10, 2022, pp. 10758-10766. +[501] X. He, Q. Xu, Y. Zeng, L. Lyu, F. Wu, J. Li, and R. Jia, "Cater: Intellectual property protection on text generation apis via conditional watermarks," Advances in Neural Information Processing Systems, vol. 35, pp. 5431-5445, 2022. +[502] W. Peng, J. Yi, F. Wu, S. Wu, B. B. Zhu, L. Lyu, B. Jiao, T. Xu, G. Sun, and X. Xie, "Are you copying my model? protecting the copyright of large language models for eaas via backdoor watermark," in Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2023, pp. 7653-7668. +[503] N. Carlini, D. Paleka, K. D. Dvijotham, T. Steinke, J. Hayase, A. F. Cooper, K. Lee, M. Jagielski, M. Nasr, A. Conmy et al., "Stealing part of a production language model," arXiv preprint arXiv:2403.06634, 2024. +[504] M. Finlayson, X. Ren, and S. Swayamdipta, "Logits of api-protected llms leak proprietary information," arXiv preprint arXiv:2403.09539, 2024. +[505] S. Zanella-Beguelin, S. Tople, A. Paverd, and B. Köpf, "Grey-box extraction of natural language models," in International Conference on Machine Learning. PMLR, 2021, pp. 12278-12286. +[506] E. Horwitz, J. Kahana, and Y. Hoshen, "Recovering the pre-fine-tuning weights of generative models," arXiv preprint arXiv:2402.10208, 2024. + +[507] Z. Li, C. Wang, P. Ma, C. Liu, S. Wang, D. Wu, C. Gao, and Y. Liu, "On extracting specialized code abilities from large language models: A feasibility study," in Proceedings of the IEEE/ACM 46th International Conference on Software Engineering, 2024, pp. 1-13. +[508] A. Liu and A. Moitra, "Model stealing for any low-rank language model," arXiv preprint arXiv:2411.07536, 2024. +[509] W. Shi, A. Ajith, M. Xia, Y. Huang, D. Liu, T. Blevins, D. Chen, and L. Zettlemoyer, "Detecting pretraining data from large language models," arXiv preprint arXiv:2310.16789, 2023. +[510] J. Zhang, J. Sun, E. Yeats, Y. Ouyang, M. Kuo, J. Zhang, H. F. Yang, and H. Li, "Min- $k\%$ ++: Improved baseline for detecting pre-training data from large language models," arXiv preprint arXiv:2404.02936, 2024. +[511] D. Das, J. Zhang, and F. Tramér, "Blind baselines beat membership inference attacks for foundation models," arXiv preprint arXiv:2406.16201, 2024. +[512] P. Maini, H. Jia, N. Papernot, and A. Dziedzic, "Llm dataset inference: Did you train on my dataset?" Advances in Neural Information Processing Systems, vol. 37, pp. 124069-124092, 2024. +[513] A. V. Duarte, X. Zhao, A. L. Oliveira, and L. Li, "De-cop: Detecting copyrighted content in language models training data," arXiv preprint arXiv:2402.09910, 2024. +[514] R. Xie, J. Wang, R. Huang, M. Zhang, R. Ge, J. Pei, N. Z. Gong, and B. Dhingra, "Recall: Membership inference via relative conditional log-likelihoods," arXiv preprint arXiv:2406.15968, 2024. +[515] F. Galli, L. Melis, and T. Cucinotta, "Noisy neighbors: Efficient membership inference attacks against llms," arXiv preprint arXiv:2406.16565, 2024. +[516] H. Mozaffari and V. J. Marathe, "Semantic membership inference attack against large language models," arXiv preprint arXiv:2406.10218, 2024. +[517] M. Meeus, S. Jain, M. Rei, and Y.-A. de Montjoye, "Did the neurons read your book? document-level membership inference for large language models," in 33rd USENIX Security Symposium (USENIX Security 24), 2024, pp. 2369-2385. +[518] M. Meeus, I. Shilov, M. Faysse, and Y.-A. De Montjoye, "Copyright traps for large language models," arXiv preprint arXiv:2402.09363, 2024. +[519] H. Puerto, M. Gubri, S. Yun, and S. J. Oh, "Scaling up membership inference: When and how attacks succeed on large language models," arXiv preprint arXiv:2411.00154, 2024. +[520] M. Anderson, G. Amit, and A. Goldsteen, “Is my data in your retrieval database? membership inference attacks against retrieval augmented generation,” arXiv preprint arXiv:2405.20446, 2024. +[521] Y. Li, G. Liu, C. Wang, and Y. Yang, "Generating is believing: Membership inference attacks against retrieval-augmented generation," arXiv preprint arXiv:2406.19234, 2024. +[522] R. Wen, Z. Li, M. Backes, and Y. Zhang, "Membership inference attacks against in-context learning," in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 3481- + +3495. +[523] H. Duan, A. Dziedzic, M. Yaghini, N. Papernot, and F. Boenisch, "On the privacy risk of in-context learning," arXiv preprint arXiv:2411.10512, 2024. +[524] Y. Wen, L. Marchyok, S. Hong, J. Geiping, T. Goldstein, and N. Carlini, "Privacy backdoors: Enhancing membership inference through poisoning pre-trained models," arXiv preprint arXiv:2404.01231, 2024. +[525] R. Wen, T. Wang, M. Backes, Y. Zhang, and A. Salem, "Last one standing: A comparative analysis of security and privacy of soft prompt tuning, lora, and in-context learning," arXiv preprint arXiv:2310.11397, 2023. +[526] S. Balloccu, P. Schmidtová, M. Lango, and O. Dusek, "Leak, cheat, repeat: Data contamination and evaluation malpractices in closed-source llms," arXiv preprint arXiv:2402.03927, 2024. +[527] W. Fu, H. Wang, C. Gao, G. Liu, Y. Li, and T. Jiang, "Membership inference attacks against fine-tuned large language models via self-prompt calibration," in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. +[528] H. Li, G. Deng, Y. Liu, K. Wang, Y. Li, T. Zhang, Y. Liu, G. Xu, G. Xu, and H. Wang, "Digger: Detecting copyright content mis-usage in large language model training," arXiv preprint arXiv:2401.00676, 2024. +[529] A. Naseh and N. Mireshghallah, "Synthetic data can mislead evaluations: Membership inference as machine text detection," arXiv preprint arXiv:2501.11786, 2025. +[530] Z. Liao and H. Sun, "Amplegcg: Learning a universal and transferable generative model of adversarial suffixes for jailbreaking both open and closed llms," arXiv preprint arXiv:2404.07921, 2024. +[531] X. Jia, T. Pang, C. Du, Y. Huang, J. Gu, Y. Liu, X. Cao, and M. Lin, "Improved techniques for optimization-based jailbreaking on large language models," arXiv preprint arXiv:2405.21018, 2024. +[532] Y. Zhang and Z. Wei, "Boosting jailbreak attack with momentum," in ICASSP 2025-2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2025, pp. 1-5. +[533] Y. Zhao, W. Zheng, T. Cai, D. Xuan Long, K. Kawaguchi, A. Goyal, and M. Q. Shieh, "Accelerating greedy coordinate gradient and general prompt optimization via probe sampling," Advances in Neural Information Processing Systems, vol. 37, pp. 53710-53731, 2024. +[534] X. Liu, N. Xu, M. Chen, and C. Xiao, "Autodan: Generating stealthy jailbreak prompts on aligned large language models," arXiv preprint arXiv:2310.04451, 2023. +[535] S. Zhu, R. Zhang, B. An, G. Wu, J. Barrow, Z. Wang, F. Huang, A. Nenkova, and T. Sun, "Autodan: interpretable gradient-based adversarial attacks on large language models," arXiv preprint arXiv:2310.15140, 2023. +[536] A. Mehrotra, M. Zampetakis, P. Kassianik, B. Nelson, H. Anderson, Y. Singer, and A. Karbasi, "Tree of attacks: Jailbreaking black-box llms automatically," Advances in Neural Information Processing Systems, vol. 37, pp. 61-65, 2024. +[537] C. Sitawarin, N. Mu, D. Wagner, and A. Araujo, + +"Pal: Proxy-guided black-box attack on large language models," arXiv preprint arXiv:2402.09674, 2024. +[538] G. Deng, Y. Liu, Y. Li, K. Wang, Y. Zhang, Z. Li, H. Wang, T. Zhang, and Y. Liu, "Masterkey: Automated jailbreak across multiple large language model chatbots," arXiv preprint arXiv:2307.08715, 2023. +[539] X. Liu, P. Li, E. Suh, Y. Vorobeychik, Z. Mao, S. Jha, P. McDaniel, H. Sun, B. Li, and C. Xiao, "Autodanturbo: A lifelong agent for strategy self-exploration to jailbreak llms," arXiv preprint arXiv:2410.05295, 2024. +[540] Y. Liu, X. He, M. Xiong, J. Fu, S. Deng, and B. Hooi, "Flipattack: Jailbreak llms via flipping," arXiv preprint arXiv:2410.02832, 2024. +[541] T. Wu, Z. Xue, Y. Liu, J. Zhang, B. Hooi, and S.-K. Ng, "Geneshift: Impact of different scenario shift on jailbreaking llm," 2025. [Online]. Available: https://arxiv.org/abs/2504.08104 +[542] F. Perez and I. Ribeiro, "Ignore previous prompt: Attack techniques for language models," arXiv preprint arXiv:2211.09527, 2022. +[543] K. Greshake, S. Abdelnabi, S. Mishra, C. Endres, T. Holz, and M. Fritz, "Not what you've signed up for: Compromising real-world llm-integrated applications with indirect prompt injection," in Proceedings of the 16th ACM Workshop on Artificial Intelligence and Security, 2023, pp. 79-90. +[544] Y. Liu, G. Deng, Y. Li, K. Wang, Z. Wang, X. Wang, T. Zhang, Y. Liu, H. Wang, Y. Zheng et al., "Prompt injection attack against llm-integrated applications," arXiv preprint arXiv:2306.05499, 2023. +[545] S. Toyer, O. Watkins, E. A. Mendes, J. Svegliato, L. Bailey, T. Wang, I. Ong, K. Elmaaroufi, P. Abbeel, T. Darrell et al., "Tensor trust: Interpretable prompt injection attacks from an online game," arXiv preprint arXiv:2311.01011, 2023. +[546] J. Shi, Z. Yuan, Y. Liu, Y. Huang, P. Zhou, L. Sun, and N. Z. Gong, "Optimization-based prompt injection attack to lmm-as-a-judge," in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 660-674. +[547] X. Liu, Z. Yu, Y. Zhang, N. Zhang, and C. Xiao, "Automatic and universal prompt injection attacks against large language models," arXiv preprint arXiv:2403.04957, 2024. +[548] X. Liu, S. Jha, P. McDaniel, B. Li, and C. Xiao, "Autohijacker: Automatic indirect prompt injection against black-box llm agents." +[549] A. Al-Kaswan, M. Izadi, and A. Van Deursen, "Targeted attack on gpt-neo for the satml language model data extraction challenge," arXiv preprint arXiv:2302.07735, 2023. +[550] E. Su, A. Vellore, A. Chang, R. Mura, B. Nelson, P. Kassianik, and A. Karbasi, "Extracting memorized training data via decomposition," arXiv preprint arXiv:2409.12367, 2024. +[551] J. Huang, H. Shao, and K. C.-C. Chang, "Are large pre-trained language models leaking your personal information?" arXiv preprint arXiv:2205.12628, 2022. +[552] Z. Zhang, J. Wen, and M. Huang, "Ethicist: Targeted training data extraction through loss smoothed soft prompting and calibrated confidence estimation," + +arXiv preprint arXiv:2307.04401, 2023. +[553] K. K. Nakka, A. Frikha, R. Mendes, X. Jiang, and X. Zhou, "Pii-compass: Guiding llm training data extraction prompts towards the target pii via grounding," arXiv preprint arXiv:2407.02943, 2024. +[554] Z. Wang, R. Bao, Y. Wu, J. Taylor, C. Xiao, F. Zheng, W. Jiang, S. Gao, and Y. Zhang, "Unlocking memorization in large language models with dynamic soft prompting," arXiv preprint arXiv:2409.13853, 2024. +[555] J. G. Wang, J. Wang, M. Li, and S. Neel, "Pandora's white-box: Precise training data detection and extraction in large language models," arXiv preprint arXiv:2402.17012, 2024. +[556] Z. Sha and Y. Zhang, "Prompt stealing attacks against large language models," arXiv preprint arXiv:2402.12959, 2024. +[557] C. Zhang, J. X. Morris, and V. Shmatikov, "Extracting prompts by inverting llm outputs," arXiv preprint arXiv:2405.15012, 2024. +[558] Y. Yang, C. Li, Y. Jiang, X. Chen, H. Wang, X. Zhang, Z. Wang, and S. Ji, "Prsa: Prompt stealing attacks against large language models," arXiv preprint arXiv:2402.19200, 2024. +[559] Y. Zeng, H. Lin, J. Zhang, D. Yang, R. Jia, and W. Shi, "How johnny can persuade llms to jailbreak them: Rethinking persuasion to challenge ai safety by humanizing llms," in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2024, pp. 14322-14350. +[560] X. Shen, Z. Chen, M. Backes, Y. Shen, and Y. Zhang, "do anything now": Characterizing and evaluating in-the-wild jailbreak prompts on large language models," in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 1671-1685. +[561] Z. Wang, W. Xie, B. Wang, E. Wang, Z. Gui, S. Ma, and K. Chen, "Foot in the door: Understanding large language model jailbreaking via cognitive psychology," arXiv preprint arXiv:2402.15690, 2024. +[562] M. Samvelyan, S. C. Raparthy, A. Lupu, E. Hambro, A. Markosyan, M. Bhatt, Y. Mao, M. Jiang, J. Parker-Holder, J. Foerster et al., "Rainbow teaming: Open-ended generation of diverse adversarial prompts," Advances in Neural Information Processing Systems, vol. 37, pp. 69747-69786, 2024. +[563] H. Jin, R. Chen, A. Zhou, Y. Zhang, and H. Wang, "Guard: Role-playing to generate natural-language jailbreakings to test guideline adherence of large language models," arXiv preprint arXiv:2402.03299, 2024. +[564] Y. Yuan, W. Jiao, W. Wang, J.-t. Huang, P. He, S. Shi, and Z. Tu, "Gpt-4 is too smart to be safe: Stealthy chat with llms via cipher," arXiv preprint arXiv:2308.06463, 2023. +[565] H. Lv, X. Wang, Y. Zhang, C. Huang, S. Dou, J. Ye, T. Gui, Q. Zhang, and X. Huang, "Codechameleon: Personalized encryption framework for jailbreaking large language models," arXiv preprint arXiv:2402.16717, 2024. +[566] F. Jiang, Z. Xu, L. Niu, Z. Xiang, B. Ramasubramanian, B. Li, and R. Poovendran, "Artprompt: Ascii art-based jailbreak attacks against aligned llms," in Proceedings + +of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2024, pp. 15 157-15 173. +[567] C. Anil, E. Durmus, N. Panickssery, M. Sharma, J. Benton, S. Kundu, J. Batson, M. Tong, J. Mu, D. Ford et al., "Many-shot jailbreaking," Advances in Neural Information Processing Systems, vol. 37, pp. 129-696-129742, 2024. +[568] Z.-X. Yong, C. Menghini, and S. H. Bach, "Low-resource languages jailbreak gpt-4," arXiv preprint arXiv:2310.02446, 2023. +[569] W. Wang, Z. Tu, C. Chen, Y. Yuan, J.-T. Huang, W. Jiao, and M. R. Lyu, "All languages matter: On the multilingual safety of llms," in Annual Meeting of the Association for Computational Linguistics, 2024. [Online]. Available: https://api-semanticscholar.org/ CorpusID:271931322 +[570] Z. Wei, Y. Wang, A. Li, Y. Mo, and Y. Wang, "Jailbreak and guard aligned language models with only few in-context demonstrations," arXiv preprint arXiv:2310.06387, 2023. +[571] N. Xu, F. Wang, B. Zhou, B. Z. Li, C. Xiao, and M. Chen, "Cognitive overload: Jailbreaking large language models with overloaded logical thinking," arXiv preprint arXiv:2311.09827, 2023. +[572] P. Ding, J. Kuang, D. Ma, X. Cao, Y. Xian, J. Chen, and S. Huang, "A wolf in sheep's clothing: Generalized nested jailbreak prompts can fool large language models easily," arXiv preprint arXiv:2311.08268, 2023. +[573] B. Upadhayay and V. Behzadan, "Sandwich attack: Multi-language mixture adaptive attack on llms," arXiv preprint arXiv:2404.07242, 2024. +[574] D. Yao, J. Zhang, I. G. Harris, and M. Carlsson, "Fuzzllm: A novel and universal fuzzing framework for proactively discovering jailbreak vulnerabilities in large language models," in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 4485-4489. +[575] B. Li, H. Xing, C. Huang, J. Qian, H. Xiao, L. Feng, and C. Tian, "Structuralsleight: Automated jailbreak attacks on large language models utilizing uncommon text-encoded structure," arXiv e-prints, pp. arXiv-2406, 2024. +[576] A. Paulus, A. Zharmagambetov, C. Guo, B. Amos, and Y. Tian, "Advprompter: Fast adaptive adversarial prompting for llms," arXiv preprint arXiv:2404.16873, 2024. +[577] A. Wei, N. Haghtalab, and J. Steinhardt, "Jailbroken: How does llm safety training fail?" Advances in Neural Information Processing Systems, vol. 36, pp. 80079-80110, 2023. +[578] Z. Chen, Z. Zhao, W. Qu, Z. Wen, Z. Han, Z. Zhu, J. Zhang, and H. Yao, "Pandora: Detailed llm jailbreaking via collaborated phishing agents with decomposed reasoning," in ICLR 2024 Workshop on Secure and Trustworthy Large Language Models, 2024. +[579] E. Perez, S. Huang, F. Song, T. Cai, R. Ring, J. Aslanides, A. Glaese, N. McAleese, and G. Irving, "Red teaming language models with language models," arXiv preprint arXiv:2202.03286, 2022. +[580] R. Shah, S. Pour, A. Tagade, S. Casper, J. Rando et al., + +"Scalable and transferable black-box jailbreaks for language models via persona modulation," arXiv preprint arXiv:2311.03348, 2023. +[581] X. Guo, F. Yu, H. Zhang, L. Qin, and B. Hu, "Coldattack: Jailbreaking lms with stealthiness and controllability," arXiv preprint arXiv:2402.08679, 2024. +[582] J. Yu, H. Luo, J. Y.-C. Hu, W. Guo, H. Liu, and X. Xing, "Enhancing jailbreak attack against large language models through silent tokens," arXiv preprint arXiv:2405.20653, 2024. +[583] Z.-W. Hong, I. Shenfeld, T.-H. Wang, Y.-S. Chuang, A. Pareja, J. Glass, A. Srivastava, and P. Agrawal, "Curiosity-driven red-teaming for large language models," arXiv preprint arXiv:2402.19464, 2024. +[584] X. Zheng, T. Pang, C. Du, Q. Liu, J. Jiang, and M. Lin, "Improved few-shot jailbreaking can circumvent aligned language models and their defenses," Advances in Neural Information Processing Systems, vol. 37, pp. 32-856-32-887, 2024. +[585] Z. Xiao, Y. Yang, G. Chen, and Y. Chen, "Distract large language models for automatic jailbreak attack," arXiv preprint arXiv:2403.08424, 2024. +[586] Z. Chang, M. Li, Y. Liu, J. Wang, Q. Wang, and Y. Liu, "Play guessing game with llm: Indirect jailbreak attack with implicit clues," arXiv preprint arXiv:2402.09091, 2024. +[587] J. Yu, X. Lin, Z. Yu, and X. Xing, "Gptfuzzer: Red teaming large language models with auto-generated jailbreak prompts," arXiv preprint arXiv:2309.10253, 2023. +[588] W. Jiang, Z. Wang, J. Zhai, S. Ma, Z. Zhao, and C. Shen, "Unlocking adversarial suffix optimization without affirmative phrases: Efficient black-box jailbreaking via llm as optimizer," arXiv preprint arXiv:2408.11313, 2024. +[589] J. Zhang, Z. Wang, R. Wang, X. Ma, and Y.-G. Jiang, "Enja: Ensemble jailbreak on large language models," arXiv preprint arXiv:2408.03603, 2024. +[590] X. Zhao, X. Yang, T. Pang, C. Du, L. Li, Y.-X. Wang, and W. Y. Wang, "Weak-to-strong jailbreaking on large language models," arXiv preprint arXiv:2401.17256, 2024. +[591] B. Upadhayay, V. Behzadan, and A. Karbasi, "Cognitive overload attack: Prompt injection for long context," arXiv preprint arXiv:2410.11272, 2024. +[592] H. Kwon and W. Pak, "Text-based prompt injection attack using mathematical functions in modern large language models," *Electronics*, vol. 13, no. 24, p. 5008, 2024. +[593] E. Bagdasaryan, T.-Y. Hsieh, B. Nassi, and V. Shmatikov, "Abusing images and sounds for indirect instruction injection in multi-modal llms," arXiv preprint arXiv:2307.10490, 2023. +[594] D. Pasquini, M. Strohmeier, and C. Troncoso, "Neural exec: Learning (and learning from) execution triggers for prompt injection attacks," in Proceedings of the 2024 Workshop on Artificial Intelligence and Security, 2024, pp. 89-100. +[595] Z. Shao, H. Liu, J. Mu, and N. Z. Gong, "Making llms vulnerable to prompt injection via poisoning alignment," arXiv preprint arXiv:2410.14827, 2024. +[596] Y. Yang, H. Yao, B. Yang, Y. He, Y. Li, T. Zhang, + +Z. Qin, and K. Ren, "Tapi: Towards target-specific and adversarial prompt injection against code llms," arXiv preprint arXiv:2407.09164, 2024. +[597] Y. Ren, "F2a: An innovative approach for prompt injection by utilizing feign security detection agents," arXiv preprint arXiv:2410.08776, 2024. +[598] R. Pedro, D. Castro, P. Carreira, and N. Santos, "From prompt injections to sql injection attacks: How protected is your llm-integrated web application?" arXiv preprint arXiv:2308.01990, 2023. +[599] Y. Lee, T. Park, Y. Lee, J. Gong, and J. Kang, "Exploring potential prompt injection attacks in federated military Ilms and their mitigation," arXiv preprint arXiv:2501.18416, 2025. +[600] D. Lee and M. Tiwari, "Prompt infection: Llm-to-llm prompt injection within multi-agent systems," arXiv preprint arXiv:2410.07283, 2024. +[601] W. Zhang, X. Kong, C. Dewitt, T. Braunl, and J. B. Hong, "A study on prompt injection attack against lvm-integrated mobile robotic systems," in 2024 IEEE 35th International Symposium on Software Reliability Engineering Workshops (ISSREW). IEEE, 2024, pp. 361-368. +[602] W. Meng, Z. Guo, L. Wu, C. Gong, W. Liu, W. Li, C. Wei, and W. Chen, "Rr: Unveiling llm training privacy through recollection and ranking," arXiv preprint arXiv:2502.12658, 2025. +[603] B. Jayaraman, E. Ghosh, H. Inan, M. Chase, S. Roy, and W. Dai, "Active data pattern extraction attacks on generative language models," arXiv preprint arXiv:2207.10802, 2022. +[604] Z. Zeng, T. Xiang, S. Guo, J. He, Q. Zhang, G. Xu, and T. Zhang, "Contrast-then-approximate: Analyzing keyword leakage of generative language models," IEEE Transactions on Information Forensics and Security, 2024. +[605] C. Jiang, X. Pan, G. Hong, C. Bao, and M. Yang, "Rag-thief: Scalable extraction of private data from retrieval-augmented generation applications with agent-based attacks," arXiv preprint arXiv:2411.14110, 2024. +[606] Z. Qi, H. Zhang, E. Xing, S. Kakade, and H. Lakkaraju, "Follow my instruction and spill the beans: Scalable data extraction from retrieval-augmented generation systems," arXiv preprint arXiv:2402.17840, 2024. +[607] S. Zeng, J. Zhang, P. He, Y. Xing, Y. Liu, H. Xu, J. Ren, S. Wang, D. Yin, Y. Chang et al., "The good and the bad: Exploring privacy issues in retrieval-augmented generation (rag)," arXiv preprint arXiv:2402.16893, 2024. +[608] Y. Peng, J. Wang, H. Yu, and A. Houmansadr, "Data extraction attacks in retrieval-augmented generation via backdoors," arXiv preprint arXiv:2411.01705, 2024. +[609] A. Panda, C. A. Choquette-Choo, Z. Zhang, Y. Yang, and P. Mittal, "Teach llms to phish: Stealing private information from language models," arXiv preprint arXiv:2403.00871, 2024. +[610] L. Lu, Z. Zuo, Z. Sheng, and P. Zhou, “Merger-as-a-stealer: Stealing targeted pii from aligned llms with model merging,” arXiv preprint arXiv:2502.16094, 2025. +[611] X. Chen, S. Tang, R. Zhu, S. Yan, L. Jin, Z. Wang, L. Su, + +Z. Zhang, X. Wang, and H. Tang, "The janus interface: How fine-tuning in large language models amplifies the privacy risks," in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 1285-1299. +[612] R. Panchendrarajan and S. Bhoi, "Dataset reconstruction attack against language models," 2021. +[613] M. R. U. Rashid, V. A. Dasu, K. Gu, N. Sultana, and S. Mehnaz, "Fltrojan: Privacy leakage attacks against federated language models through selective weight tampering," arXiv preprint arXiv:2310.16152, 2023. +[614] J. Dentan, A. Paran, and A. Shabou, "Reconstructing training data from document understanding models," in 33rd USENIX Security Symposium (USENIX Security 24), 2024, pp. 6813-6830. +[615] J. Hósciłowicz, P. Popiołek, J. Rudkowski, J. Bieniasz, and A. Janicki, "Unconditional token forcing: Extracting text hidden within llm," in 2024 19th Conference on Computer Science and Intelligence Systems (FedCSIS). IEEE, 2024, pp. 621-624. +[616] A. Al-Kaswan, M. Izadi, and A. Van Deursen, "Traces of memorisation in large language models for code," in Proceedings of the IEEE/ACM 46th International Conference on Software Engineering, 2024, pp. 1-12. +[617] Y. Nie, C. Wang, K. Wang, G. Xu, G. Xu, and H. Wang, "Decoding secret memorization in code llms through token-level characterization," arXiv preprint arXiv:2410.08858, 2024. +[618] E. Lehman, S. Jain, K. Pichotta, Y. Goldberg, and B. C. Wallace, "Does bert pretrained on clinical notes reveal sensitive data?" arXiv preprint arXiv:2104.07762, 2021. +[619] A. Diera, N. Lell, A. Garifullina, and A. Scherp, "Memorization of named entities in fine-tuned bert models," in International Cross-Domain Conference for Machine Learning and Knowledge Extraction. Springer, 2023, pp. 258-279. +[620] R. Zhang, S. Hidano, and F. Koushanfar, "Text re- vealer: Private text reconstruction via model inversion attacks against transformers," arXiv preprint arXiv:2209.10505, 2022. +[621] Y. Huang, Y. Li, W. Wu, J. Zhang, and M. R. Lyu, "Your code secret belongs to me: neural code completion tools can memorize hard-coded credentials," Proceedings of the ACM on Software Engineering, vol. 1, no. FSE, pp. 2515-2537, 2024. +[622] T. Tiwari and G. E. Suh, "Sequence-level analysis of leakage risk of training data in large language models," arXiv preprint arXiv:2412.11302, 2024. +[623] H. Shao, J. Huang, S. Zheng, and K. C.-C. Chang, "Quantifying association capabilities of large language models and its implications on privacy leakage," arXiv preprint arXiv:2305.12707, 2023. +[624] Y. More, P. Ganesh, and G. Farnadi, "Towards more realistic extraction attacks: An adversarial perspective," arXiv preprint arXiv:2407.02596, 2024. +[625] R. Staab, M. Vero, M. Balunović, and M. Vechev, "Beyond memorization: Violating privacy via inference with large language models," arXiv preprint arXiv:2310.07298, 2023. +[626] H. Xu, Z. Zhang, X. Yu, Y. Wu, Z. Zha, B. Xu, W. Xu, M. Hu, and K. Peng, "Targeted training data extrac + +tion—neighborhood comparison-based membership inference attacks in large language models," Applied Sciences, vol. 14, no. 16, p. 7118, 2024. +[627] A. Karamolegkou, J. Li, L. Zhou, and A. Søgaard, "Copyright violations and large language models," arXiv preprint arXiv:2310.13771, 2023. +[628] X. Zheng, H. Han, S. Shi, Q. Fang, Z. Du, X. Hu, and Q. Guo, "Inputsnatch: Stealing input in llm services via timing side-channel attacks," arXiv preprint arXiv:2411.18191, 2024. +[629] Y. Dong, R. Mu, G. Jin, Y. Qi, J. Hu, X. Zhao, J. Meng, W. Ruan, and X. Huang, "Building guardrails for large language models," arXiv preprint arXiv:2402.01822, 2024. +[630] N. Jain, A. Schwarzschild, Y. Wen, G. Somepalli, J. Kirchenbauer, P. yeh Chiang, M. Goldblum, A. Saha, J. Geiping, and T. Goldstein, "Baseline defenses for adversarial attacks against aligned language models," 2024. +[631] H. Lin, Y. Lao, T. Geng, T. Yu, and W. Zhao, "Uniguardian: A unified defense for detecting prompt injection, backdoor attacks and adversarial attacks in large language models," arXiv preprint arXiv:2502.13141, 2025. +[632] Z. Hu, G. Wu, S. Mitra, R. Zhang, T. Sun, H. Huang, and V. Swaminathan, "Token-level adversarial prompt detection based on perplexity measures and contextual information," in ICLR 2025 Workshop on Building Trust in Language Models and Applications, 2025. +[633] Y. Gou, K. Chen, Z. Liu, L. Hong, H. Xu, Z. Li, D.-Y. Yeung, J. T. Kwok, and Y. Zhang, "Eyes closed, safety on: Protecting multimodal llms via image-to-text transformation," in European Conference on Computer Vision, 2024, pp. 388-404. +[634] S. Armstrong, M. Franklin, C. Stevens, and R. Gorman, "Defense against the dark prompts: Mitigating best-of-n jailbreaking with prompt evaluation," arXiv preprint arXiv:2107.03374, 2025. +[635] Y. Xie, M. Fang, R. Pi, and N. Gong, "GradSafe: Detecting jailbreak prompts for LLMs via safety-critical gradient analysis," in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), L.-W. Ku, A. Martins, and V. Srikumar, Eds., 2024, pp. 507-518. +[636] B. Peng, Z. Bi, Q. Niu, M. Liu, P. Feng, T. Wang, L. K. Yan, Y. Wen, Y. Zhang, and C. H. Yin, "Jailbreaking and mitigation of vulnerabilities in large language models," arXiv preprint arXiv:2410.15236, 2024. +[637] A. Kumar, C. Agarwal, S. Srinivas, A. J. Li, S. Feizi, and H. Lakkaraju, "Certifying LLM safety against adversarial prompting," in First Conference on Language Modeling, 2024. +[638] X. Zhang, C. Zhang, T. Li, Y. Huang, X. Jia, M. Hu, J. Zhang, Y. Liu, S. Ma, and C. Shen, "Jailguard: A universal detection framework for llm prompt-based attacks," arXiv preprint arXiv:2312.10766, 2023. +[639] Y. Liu, Y. Jia, R. Geng, J. Jia, and N. Z. Gong, "Formalizing and benchmarking prompt injection attacks and defenses," in Proceedings of the 33rd USENIX Conference on Security Symposium, 2024. +[640] X. Suo, "Signed-prompt: A new approach to prevent + +prompt injection attacks against llm-integrated applications," in AIP Conference Proceedings, vol. 3194, no. 1. AIP Publishing, 2024. +[641] L. Yan, Z. Zhang, G. Tao, K. Zhang, X. Chen, G. Shen, and X. Zhang, "Parafuzz: An interpretability-driven technique for detecting poisoned samples in nlp," Advances in Neural Information Processing Systems, vol. 36, pp. 66755-66767, 2023. +[642] X. Hu, P.-Y. Chen, and T.-Y. Ho, "Gradient cuff: Detecting jailbreak attacks on large language models by exploring refusal loss landscapes," in Advances in Neural Information Processing Systems, vol. 37, 2024, pp. 126-265-126-296. +[643] G. Alon and M. J. Kamfonas, "Detecting language model attacks with perplexity," 2024. +[644] J. Ji, B. Hou, A. Robey, G. J. Pappas, H. Hassani, Y. Zhang, E. Wong, and S. Chang, "Defending large language models against jailbreak attacks via semantic smoothing," CoRR, 2024. +[645] M. Phute, A. Helbling, M. Hull, S. Peng, S. Szyller, C. Cornelius, and D. H. Chau, "Llm self defense: By self examination, llms know they are being tricked," arXiv preprint arXiv:2308.07308, 2024. +[646] L. N. Candogan, Y. Wu, E. A. Rocamora, G. G. Chrysos, and V. Cevher, "Single-pass detection of jailbreaking input in large language models," arXiv preprint arXiv:2502.15435, 2025. +[647] B. Cao, Y. Cao, L. Lin, and J. Chen, “Defending against alignment-breaking attacks via robustly aligned LLM,” in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), L.-W. Ku, A. Martins, and V. Srikumar, Eds., 2024, pp. 10542-10560. +[648] Y. Zhang, L. Ding, L. Zhang, and D. Tao, "Intention analysis makes LLMs a good jailbreak defender," in Proceedings of the 31st International Conference on Computational Linguistics, 2025, pp. 2947-2968. +[649] S. Han, K. Rao, A. Ettinger, L. Jiang, B. Y. Lin, N. Lambert, Y. Choi, and N. Dziri, "Wildguard: Open one-stop moderation tools for safety risks, jailbreaks, and refusals of llms," in The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track. +[650] M. Pisano, P. Ly, A. Sanders, B. Yao, D. Wang, T. Strzalkowski, and M. Si, "Bergeron: Combating adversarial attacks through a conscience-based alignment framework," arXiv preprint arXiv:2312.00029, 2024. +[651] A. Robey, E. Wong, H. Hassani, and G. J. Pappas, "Smoothllm: Defending large language models against jailbreaking attacks," arXiv preprint arXiv:2310.03684, 2023. +[652] J. Ji, B. Hou, Z. Zhang, G. Zhang, W. Fan, Q. Li, Y. Zhang, G. Liu, S. Liu, and S. Chang, "Advancing the robustness of large language models through self-denoised smoothing," in Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers), 2024, pp. 246-257. +[653] J. Yi, Y. Xie, B. Zhu, K. Hines, E. Kiciman, G. Sun, X. Xie, and F. Wu, "Benchmarking and defending against indirect prompt injection attacks on large lan + +guage models," CoRR, 2023. +[654] X. Song, S. Duan, and G. Liu, "Alis: Aligned llm instruction security strategy for unsafe input prompt," in Proceedings of the 31st International Conference on Computational Linguistics, 2025, pp. 9124-9146. +[655] Y. Wang, Z. Shi, A. Bai, and C.-J. Hsieh, "Defending Ilms against jailbreaking attacks via backtranslation," in Findings of the Association for Computational Linguistics: ACL 2024, L.-W. Ku, A. Martins, and V. Srikumar, Eds., 2024, pp. 16031-16046. +[656] E. Zverev, S. Abdelnabi, M. Fritz, and C. H. Lampert, "Can LLMs separate instructions from data? and what do we even mean by that?" CoRR, 2024. +[657] Y. Dong, R. Mu, G. Jin, Y. Qi, J. Hu, X. Zhao, J. Meng, W. Ruan, and X. Huang, "Building guardrails for large language models," arXiv preprint arXiv:2402.01822, 2024. +[658] D. Kumar, Y. A. AbuHashem, and Z. Durmeric, "Watch your language: Investigating content moderation with large language models," in Proceedings of the International AAAI Conference on Web and Social Media, vol. 18, 2024, pp. 865-878. +[659] T. Rebedea, R. Dinu, M. N. Sreedhar, C. Parisien, and J. Cohen, "Nemo guardrails: A toolkit for controllable and safe llm applications with programmable rails," in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, 2023, pp. 431-445. +[660] OpenAI, "Improving model safety behavior with rule-based rewards," https://openai.com/index/improving-model-safety-behavior-with-rule-based-re 2025, accessed: 2025-03-24. +[661] H. Ma, C. Zhang, H. Fu, P. Zhao, and B. Wu, "Adapting large language models for content moderation: Pitfalls in data engineering and supervised fine-tuning," arXiv preprint arXiv:2310.03400, 2023. +[662] M. Phute, A. Helbling, M. Hull, S. Peng, S. Szyller, C. Cornelius, and D. H. Chau, "Llm self defense: By self examination, llms know they are being tricked," arXiv preprint arXiv:2308.07308, 2023. +[663] Z. Gou, Z. Shao, Y. Gong, Y. Shen, Y. Yang, N. Duan, and W. Chen, "Critic: Large language models can self-correct with tool-interactive critiquing," arXiv preprint arXiv:2305.11738, 2023. +[664] C. Lu, S. Holt, C. Fanconi, A. J. Chan, J. Foerster, M. van der Schaar, and R. T. Lange, "Discovering preference optimization algorithms with and for large language models," in Advances in Neural Information Processing Systems, vol. 37, 2024, pp. 86528-86573. +[665] A. Madaan, N. Tandon, P. Gupta, S. Hallinan, L. Gao, S. Wiegreffe, U. Alon, N. Dziri, S. Prabhumoye, Y. Yang et al., "Self-refine: Iterative refinement with self-feedback," Advances in Neural Information Processing Systems, vol. 36, pp. 46534-46594, 2023. +[666] D. Jiang, X. Ren, and B. Y. Lin, "Llm-blender: Ensemble large language models with pairwise ranking and generative fusion," in Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2023, pp. 14165-14178. +[667] Z. Lai, X. Zhang, and S. Chen, "Adaptive ensembles + +of fine-tuned transformers for llm-generated text detection," in 2024 International Joint Conference on Neural Networks. IEEE, 2024, pp. 1-7. +[668] C. Xiong, X. Qi, P.-Y. Chen, and T.-Y. Ho, "Defensive prompt patch: A robust and interpretable defense of llms against jailbreak attacks," arXiv preprint arXiv:2405.20099, 2024. +[669] Z. Zhang, Q. Zhang, and J. Foerster, “Parden, can you repeat that? defending against jailbreaks via repetition,” in Proceedings of the 41st International Conference on Machine Learning, 2024, pp. 60271-60287. +[670] Z. Yuan, Z. Xiong, Y. Zeng, N. Yu, R. Jia, D. Song, and B. Li, "Rigorllm: resilient guardrails for large language models against undesired content," in Proceedings of the 41st International Conference on Machine Learning, 2024, pp. 57-953-57-965. +[671] M. Cao, M. Fatemi, J. C. Cheung, and S. Shabanian, "Systematic rectification of language models via dead-end analysis," in The Eleventh International Conference on Learning Representations, 2023. +[672] F. Faal, K. Schmitt, and J. Y. Yu, "Reward modeling for mitigating toxicity in transformer-based language models," Applied Intelligence, vol. 53, no. 7, p. 8421-8435, 2022. +[673] W. Zeng, Y. Liu, R. Mullins, L. Peran, J. Fernandez, H. Harkous, K. Narasimhan, D. Proud, P. Kumar, B. Radharapu et al., "Shieldgemma: Generative ai content moderation based on gemma," arXiv preprint arXiv:2407.21772, 2024. +[674] Z. Wang, F. Yang, L. Wang, P. Zhao, H. Wang, L. Chen, *ards/, Q. Lin, and K.-F. Wong, "SELF-GUARD: Empower the LLM to safeguard itself," in *Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics*, 2024, pp. 1648-1668. +[675] S. Ghosh, P. Varshney, E. Galinkin, and C. Parisien, "Aegis: Online adaptive ai content safety moderation with ensemble of llm experts," arXiv preprint arXiv:2404.05993, 2024. +[676] W. Wang, J.-T. Huang, W. Wu, J. Zhang, Y. Huang, S. Li, P. He, and M. R. Lyu, "Mttm: Metamorphic testing for textual content moderation software," 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE), pp. 2387-2399, 2023. [Online]. Available: https://api-semanticscholar.org/ CorpusID:256826966 +[677] K.-L. Chiu, A. Collins, and R. Alexander, "Detecting hate speech with gpt-3," arXiv preprint arXiv:2103.12407, 2021. +[678] J. Kim, A. Derakhshan, and I. G. Harris, "Robust safety classifier for large language models: Adversarial prompt shield," arXiv preprint arXiv:2311.00172, 2023. +[679] B. Krause, A. D. Gotmare, B. McCann, N. S. Keskar, S. Joty, R. Socher, and N. F. Rajani, "Gedi: Generative discriminator guided sequence generation," in Findings of the Association for Computational Linguistics: EMNLP 2021, 2021, pp. 4929-4952. +[680] Q. Liu, Z. Zhou, L. He, Y. Liu, W. Zhang, and S. Su, "Alignment-enhanced decoding: Defending jailbreaks via token-level adaptive refining of probability distributions," in Proceedings of the 2024 Conference on + +Empirical Methods in Natural Language Processing, 2024, pp. 2802-2816. +[681] A. Liu, M. Sap, X. Lu, S. Swayamdipta, C. Bhagavatula, N. A. Smith, and Y. Choi, "Dexperts: Decoding-time controlled text generation with experts and anti-experts," in Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics, 2021, pp. 6691-6706. +[682] T. Radcliffe, E. Lockhart, and J. Wetherington, "Automated prompt engineering for semantic vulnerabilities in large language models," Authorea Preprints, 2024. +[683] F. Trad and A. Chehab, "Prompt engineering or finetuning? a case study on phishing detection with large language models," Machine Learning and Knowledge Extraction, vol. 6, no. 1, pp. 367-384, 2024. +[684] A. Zhou, B. Li, and H. Wang, "Robust prompt optimization for defending language models against jailbreaking attacks," in Advances in Neural Information Processing Systems, vol. 37. Curran Associates, Inc., 2024, pp. 40184-40211. +[685] Y. Mo, Y. Wang, Z. Wei, and Y. Wang, "Fight back against jailbreaking via prompt adversarial tuning," in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. +[686] Y. Zhang, L. Ding, L. Zhang, and D. Tao, "Intention analysis makes lms a good jailbreak defender," in Proceedings of the 31st International Conference on Computational Linguistics, 2025, pp. 2947-2968. +[687] Y. Chen, H. Li, Z. Zheng, Y. Song, D. Wu, and B. Hooi, "Defense against prompt injection attack by leveraging attack techniques," arXiv preprint arXiv:2411.00459, 2024. +[688] Z. Zhang, J. Yang, P. Ke, F. Mi, H. Wang, and M. Huang, "Defending large language models against jailbreaking attacks through goal prioritization," in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics, 2023, pp. 8865-8887. +[689] Y. Xie, J. Yi, J. Shao, J. Curl, L. Lyu, Q. Chen, X. Xie, and F. Wu, "Defending chatgpt against jailbreak attack via self-reminders," Nature Machine Intelligence, vol. 5, no. 12, pp. 1486–1496, 2023. +[690] S. Chen, J. Piet, C. Sitawarin, and D. Wagner, "Struq: Defending against prompt injection with structured queries," arXiv preprint arXiv:2402.06363, 2024. +[691] K. Hines, G. Lopez, M. Hall, F. Zarfati, Y. Zunger, and E. Kiciman, "Defending against indirect prompt injection attacks with spotlighting," arXiv preprint arXiv:2403.14720, 2024. +[692] S. Slocum and D. Hadfield-Menell, "Inverse prompt engineering for task-specific LLM safety," 2025. [Online]. Available: https://openreview.net/forum? id=3MDmM0rMPQ +[693] K. Edemacu and X. Wu, "Privacy preserving prompt engineering: A survey," arXiv preprint arXiv:2404.06001, 2024. +[694] S. Utpala, S. Hooker, and P.-Y. Chen, "Locally differentially private document generation using zero shot prompting," in Findings of the Association for Computational Linguistics: EMNLP 2023, 2023, pp. 8442-8457. +[695] H. Duan, A. Dziedzic, N. Papernot, and F. Boenisch, + +"Flocks of stochastic parrots: Differentially private prompt learning for large language models," Advances in Neural Information Processing Systems, vol. 36, pp. 76852-76871, 2023. +[696] W. Wang, W. Jiao, J. Huang, R. Dai, J.-T. Huang, Z. Tu, and M. R. Lyu, "Not all countries celebrate thanksgiving: On the cultural dominance in large language models," ArXiv, vol. abs/2310.12481, 2023. [Online]. Available: https://api_semanticscholar.org/ CorpusID:264305810 +[697] M. Kaneko, D. Bollegala, N. Okazaki, and T. Baldwin, "Evaluating gender bias in large language models via chain-of-thought prompting," arXiv preprint arXiv:2401.15585, 2024. +[698] X. He, S. Zannettou, Y. Shen, and Y. Zhang, "You only prompt once: On the capabilities of prompt learning on large language models to tackle toxic content," in 2024 IEEE Symposium on Security and Privacy (SP). IEEE, 2024, pp. 770-787. +[699] X. Zou, Y. Chen, and K. Li, "Is the system message really important to jailbreaks in large language models?" arXiv preprint arXiv:2402.14857, 2024. +[700] R. Xu, Z. Qi, and W. Xu, "Preemptive answer "attacks" on chain-of-thought reasoning," in Findings of the Association for Computational Linguistics ACL 2024, 2024, pp. 14708-14726. +[701] C. Zheng, F. Yin, H. Zhou, F. Meng, J. Zhou, K.-W. Chang, M. Huang, and N. Peng, "On prompt-driven safeguarding for large language models," in Proceedings of the 41st International Conference on Machine Learning, ser. Proceedings of Machine Learning Research, vol. 235, 21-27 Jul 2024, pp. 61-613. +[702] Y. Wang, X. Liu, Y. Li, M. Chen, and C. Xiao, "Adashield: Safeguarding multimodal large language models from structure-based attack via adaptive shield prompting," in European Conference on Computer Vision. Springer, 2024, pp. 77-94. +[703] Z. Shi, Z. Wang, Y. Su, W. Luo, H. Gao, F. Yang, R. Tang, and Y. Zhang, "Robustness-aware automatic prompt optimization," arXiv preprint arXiv:2412.18196, 2024. +[704] Y. Wu, Y. Gao, B. Zhu, Z. Zhou, X. Sun, S. Yang, J.-G. Lou, Z. Ding, and L. Yang, "Strago: Harnessing strategic guidance for prompt optimization," in Findings of the Association for Computational Linguistics: EMNLP 2024, 2024, pp. 10043-10061. +[705] F. Wu, N. Zhang, S. Jha, P. McDaniel, and C. Xiao, "A new era in llm security: Exploring security concerns in real-world llm-based systems," arXiv preprint arXiv:2402.18649, 2024. +[706] A. Borzunov, M. Ryabinin, A. Chumachenko, D. Baranchuk, T. Dettmers, Y. Belkada, P. Samygin, and C. A. Raffel, "Distributed inference and finetuning of large language models over the internet," Advances in neural information processing systems, vol. 36, pp. 12312-12331, 2023. +[707] A. Agrawal, N. Kedia, A. Panwar, J. Mohan, N. Kwa-tra, B. Gulavani, A. Tumanov, and R. Ramjee, "Taming {Throughput-Latency} tradeoff in {LLM} inference with {Sarathi-Serve}", in 18th USENIX Symposium on Operating Systems Design and Implementation (OSDI + +24), 2024, pp. 117-134. +[708] Y. Zhong, S. Liu, J. Chen, J. Hu, Y. Zhu, X. Liu, X. Jin, and H. Zhang, " $\{\mathrm{DistServe}\}$ : Disaggregating prefill and decoding for goodput-optimized large language model serving," in 18th USENIX Symposium on Operating Systems Design and Implementation (OSDI 24), 2024, pp. 193-210. +[709] H. Sun, Z. Chen, X. Yang, Y. Tian, and B. Chen, "Tri force: Lossless acceleration of long sequence generation with hierarchical speculative decoding," in First Conference on Language Modeling, 2024. +[710] T. Cai, Y. Li, Z. Geng, H. Peng, J. D. Lee, D. Chen, and T. Dao, "Medusa: Simple LLM inference acceleration framework with multiple decoding heads," in Proceedings of the 41st International Conference on Machine Learning, vol. 235. PMLR, 2024, pp. 5209-5235. +[711] J. Chen, V. Tiwari, R. Sadhukhan, Z. Chen, J. Shi, I. E.-H. Yen, and B. Chen, "Magicdec: Breaking the latency-throughput tradeoff for long context generation with speculative decoding," arXiv preprint arXiv:2408.11049, 2024. +[712] C. Holmes, M. Tanaka, M. Wyatt, A. A. Awan, J. Rasley, S. Rajbhandari, R. Y. Aminabadi, H. Qin, A. Bakhtiari, L. Kurilenko et al., "Deepspeed-fastgen: High-throughput text generation for llms via mii and deepspeed-inference," arXiv preprint arXiv:2401.08671, 2024. +[713] R. Svirschevski, A. May, Z. Chen, B. Chen, Z. Jia, and M. Ryabinin, "Specexec: Massively parallel speculative decoding for interactive lmm inference on consumer devices," Advances in Neural Information Processing Systems, vol. 37, pp. 16342-16368, 2024. +[714] P. Wang, D. Zhang, L. Li, C. Tan, X. Wang, M. Zhang, K. Ren, B. Jiang, and X. Qiu, "Inferaligner: Inference-time alignment for harmlessness through cross-model guidance," in Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, 2024, pp. 10460-10479. +[715] X. Wang, D. Wu, Z. Ji, Z. Li, P. Ma, S. Wang, Y. Li, Y. Liu, N. Liu, and J. Rahmel, "Selfdefend: Llms can defend themselves against jailbreaking in a practical manner," CoRR, 2024. +[716] X. Hu, P.-Y. Chen, and T.-Y. Ho, "Gradient cuff: Detecting jailbreak attacks on large language models by exploring refusal loss landscapes," arXiv preprint arXiv:2403.00867, 2024. +[717] R. K. Sharma, V. Gupta, and D. Grossman, "Spml: A dsl for defending language models against prompt attacks," arXiv preprint arXiv:2402.11755, 2024. +[718] J. Zhao, S. Wang, Y. Zhao, X. Hou, K. Wang, P. Gao, Y. Zhang, C. Wei, and H. Wang, "Models are codes: Towards measuring malicious code poisoning attacks on pre-trained model hubs," in Proceedings of the 39th IEEE/ACM International Conference on Automated Software Engineering, 2024, pp. 2087-2098. +[719] S. Ghosh, P. Varshney, E. Galinkin, and C. Parisien, "Aegis: Online adaptive ai content safety moderation with ensemble of llm experts," arXiv preprint arXiv:2404.05993, 2024. +[720] S. Ghosh, P. Varshney, M. N. Sreedhar, A. Padmakumar, T. Rebedea, J. R. Varghese, and C. Parisien, + +"Aegis2.0: A diverse ai safety dataset and risks taxonomy for alignment of llm guardrails," in Neurips Safe Generative AI Workshop 2024, 2024. +[721] S. Han, K. Rao, A. Ettinger, L. Jiang, B. Y. Lin, N. Lambert, Y. Choi, and N. Dziri, "Wildguard: Open one-stop moderation tools for safety risks, jailbreaks, and refusals of llms," arXiv preprint arXiv:2406.18495, 2024. +[722] W. Zeng, Y. Liu, R. Mullins, L. Peran, J. Fernandez, H. Harkous, K. Narasimhan, D. Proud, P. Kumar, B. Radharapu et al., "Shieldgemma: Generative ai content moderation based on gemma," arXiv preprint arXiv:2407.21772, 2024. +[723] Y. Liu, H. Gao, S. Zhai, J. Xia, T. Wu, Z. Xue, Y. Chen, K. Kawaguchi, J. Zhang, and B. Hooi, "Guardreasoner: Towards reasoning-based llm safeguards," arXiv preprint arXiv:2501.18492, 2025. +[724] C. Wang, Y. Liu, B. Li, D. Zhang, Z. Li, and J. Fang, "Safety in large reasoning models: A survey," arXiv preprint arXiv:2504.17704, 2025. +[725] H. Jin, A. Zhou, J. Menke, and H. Wang, "Jailbreaking large language models against moderation guardrails via cipher characters," Advances in Neural Information Processing Systems, vol. 37, pp. 59408-59435, 2024. +[726] D. Ran, J. Liu, Y. Gong, J. Zheng, X. He, T. Cong, and A. Wang, "Jailbreak: An integrated toolkit for evaluating jailbreak attempts against large language models," arXiv preprint arXiv:2406.09321, 2024. +[727] H. Qiu, S. Zhang, A. Li, H. He, and Z. Lan, "Latent jailbreak: A benchmark for evaluating text safety and output robustness of large language models," arXiv preprint arXiv:2307.08487, 2023. +[728] K. Zhu, J. Wang, J. Zhou, Z. Wang, H. Chen, Y. Wang, L. Yang, W. Ye, Y. Zhang, N. Gong et al., "Promptrobust: Towards evaluating the robustness of large language models on adversarial prompts," in Proceedings of the 1st ACM Workshop on Large AI Systems and Models with Privacy and Safety Analysis, 2023, pp. 57-68. +[729] A. Pei, Z. Yang, S. Zhu, R. Cheng, and J. Jia, "Selfprompt: Autonomously evaluating llm robustness via domain-constrained knowledge guidelines and refined adversarial prompts," arXiv preprint arXiv:2412.00765, 2024. +[730] Z. Xu, Y. Liu, G. Deng, Y. Li, and S. Picek, "A comprehensive study of jailbreak attack versus defense for large language models," arXiv preprint arXiv:2402.13457, 2024. +[731] K. Chen, Y. Liu, D. Wang, J. Chen, and W. Wang, "Characterizing and evaluating the reliability of llms against jailbreak attacks," arXiv preprint arXiv:2408.09326, 2024. +[732] B. Wang, C. Xu, S. Wang, Z. Gan, Y. Cheng, J. Gao, A. H. Awadallah, and B. Li, "Adversarial glue: A multi-task benchmark for robustness evaluation of language models," arXiv preprint arXiv:2111.02840, 2021. +[733] G. Dong, J. Zhao, T. Hui, D. Guo, W. Wang, B. Feng, Y. Qiu, Z. Gongque, K. He, Z. Wang et al., "Revisit input perturbation problems for llms: A unified robustness evaluation framework for noisy slot filling task," in CCF International Conference on Natural Language Processing and Chinese Computing. Springer, + +2023, pp. 682-694. +[734] J. Zheng, A. Ritter, and W. Xu, "Neo-bench: Evaluating robustness of large language models with neologisms," arXiv preprint arXiv:2402.12261, 2024. +[735] Y. Li, Y. Guo, F. Guerin, and C. Lin, "Evaluating large language models for generalization and robustness via data compression," arXiv preprint arXiv:2402.00861, 2024. +[736] Q. Zhang, H. Qiu, D. Wang, Y. Li, T. Zhang, W. Zhu, H. Weng, L. Yan, and C. Zhang, “A benchmark for semantic sensitive information in llms outputs,” in The Thirteenth International Conference on Learning Representations. +[737] A. Wang, A. Singh, J. Michael, F. Hill, O. Levy, and S. R. Bowman, "Glue: A multi-task benchmark and analysis platform for natural language understanding," arXiv preprint arXiv:1804.07461, 2018. +[738] J. Li, X. Cheng, W. X. Zhao, J.-Y. Nie, and J.-R. Wen, "Halueval: A large-scale hallucination evaluation benchmark for large language models," arXiv preprint arXiv:2305.11747, 2023. +[739] A. Pal, L. K. Umapathi, and M. Sankarasubbu, "Med-halt: Medical domain hallucination test for large language models," arXiv preprint arXiv:2307.15343, 2023. +[740] Z. Ji, Y. Gu, W. Zhang, C. Lyu, D. Lin, and K. Chen, "Anah: Analytical annotation of hallucinations in large language models," arXiv preprint arXiv:2405.20315, 2024. +[741] P. Manakul, A. Liusie, and M. J. Gales, "Selfcheck-gpt: Zero-resource black-box hallucination detection for generative large language models," arXiv preprint arXiv:2303.08896, 2023. +[742] Y.-S. Chuang, Y. Xie, H. Luo, Y. Kim, J. Glass, and P. He, "Dola: Decoding by contrasting layers improves factuality in large language models," arXiv preprint arXiv:2309.03883, 2023. +[743] N. Mündler, J. He, S. Jenko, and M. Vechev, "Self-contradictory hallucinations of large language models: Evaluation, detection and mitigation," arXiv preprint arXiv:2305.15852, 2023. +[744] M. Elaraby, M. Lu, J. Dunn, X. Zhang, Y. Wang, S. Liu, P. Tian, Y. Wang, and Y. Wang, "Halo: Estimation and reduction of hallucinations in open-source weak large language models," arXiv preprint arXiv:2308.11764, 2023. +[745] Z. Ji, D. Chen, E. Ishii, S. Cahyawijaya, Y. Bang, B. Wilie, and P. Fung, "Llm internal states reveal hallucination risk faced with a query," arXiv preprint arXiv:2407.03282, 2024. +[746] J. Wei, Y. Yao, J.-F. Ton, H. Guo, A. Estornell, and Y. Liu, "Measuring and reducing llm hallucination without gold-standard answers," arXiv preprint arXiv:2402.10412, 2024. +[747] A. Deshpande, V. Murahari, T. Rajpurohit, A. Kalyan, and K. Narasimhan, "Toxicity in chatgpt: Analyzing persona-assigned language models," arXiv preprint arXiv:2304.05335, 2023. +[748] A. de Wynter, I. Watts, T. Wongsangaroonsri, M. Zhang, N. Farra, N. E. Altintoprak, L. Baur, S. Claudet, P. Gajdusek, C. Gören et al., "Rtp-lx: Can llms evaluate toxicity in multilingual scenarios?" + +arXiv preprint arXiv:2404.14397, 2024. +[749] D. Esiobu, X. Tan, S. Hosseini, M. Ung, Y. Zhang, J. Fernandes, J. Dwivedi-Yu, E. Presani, A. Williams, and E. M. Smith, "Robbie: Robust bias evaluation of large generative language models," arXiv preprint arXiv:2311.18140, 2023. +[750] S. Wang, P. Wang, T. Zhou, Y. Dong, Z. Tan, and J. Li, "Ceb: Compositional evaluation benchmark for fairness in large language models," arXiv preprint arXiv:2407.02408, 2024. +[751] H. Li, D. Guo, D. Li, W. Fan, Q. Hu, X. Liu, C. Chan, D. Yao, Y. Yao, and Y. Song, "Privlm-bench: A multi-level privacy evaluation benchmark for language models," arXiv preprint arXiv:2311.04044, 2023. +[752] Q. Li, J. Hong, C. Xie, J. Tan, R. Xin, J. Hou, X. Yin, Z. Wang, D. Hendrycks, Z. Wang et al., "Llm-pbe: Assessing data privacy in large language models," arXiv preprint arXiv:2408.12787, 2024. +[753] D. Zhu, D. Chen, X. Wu, J. Geng, Z. Li, J. Grossklags, and L. Ma, "Privauditor: Benchmarking data protection vulnerabilities in llm adaptation techniques," Advances in Neural Information Processing Systems, vol. 37, pp. 9668-9689, 2024. +[754] L. Rossi, B. Marek, V. Hanke, X. Wang, M. Backes, A. Dziedzic, and F. Boenisch, "Auditing empirical privacy protection of private llm adaptations," in Neurips Safe Generative AI Workshop 2024. +[755] T. Singh, H. Aditya, V. K. Madisetti, and A. Bahga, "Whispered tuning: Data privacy preservation in finetuning llms through differential privacy," Journal of Software Engineering and Applications, vol. 17, no. 1, pp. 1-22, 2024. +[756] H. Li, W. Hu, H. Jing, Y. Chen, Q. Hu, S. Han, T. Chu, P. Hu, and Y. Song, "Privaci-bench: Evaluating privacy with contextual integrity and legal compliance," arXiv preprint arXiv:2502.17041, 2025. +[757] O. Cartwright, H. Dunbar, and T. Radcliffe, “Evaluating privacy compliance in commercial large language models-chatgpt, claude, and gemini,” 2024. +[758] X. Zhou, M. Weyssow, R. Widyasari, T. Zhang, J. He, Y. Lyu, J. Chang, B. Zhang, D. Huang, and D. Lo, "Lessleak-bench: A first investigation of data leakage in llms across 83 software engineering benchmarks," arXiv preprint arXiv:2502.06215, 2025. +[759] Y. Song, R. Liu, S. Chen, Q. Ren, Y. Zhang, and Y. Yu, "Securesql: Evaluating data leakage of large language models as natural language interfaces to databases," in Findings of the Association for Computational Linguistics: EMNLP 2024, 2024, pp. 5975-5990. +[760] X. Liu, Y. Zhu, J. Gu, Y. Lan, C. Yang, and Y. Qiao, "Mm-safetybench: A benchmark for safety evaluation of multimodal large language models," in European Conference on Computer Vision. Springer, 2024, pp. 386-403. +[761] W. Luo, S. Ma, X. Liu, X. Guo, and C. Xiao, "Jailbreakv-28k: A benchmark for assessing the robustness of multimodal large language models against jailbreak attacks," arXiv e-prints, pp. arXiv-2404, 2024. +[762] F. Weng, Y. Xu, C. Fu, and W. Wang, "A comprehensive study on jailbreak attacks and defenses for + +multimodal large language models," arXiv preprint arXiv:2408.08464, 2024. +[763] Z. Li, P.-Y. Chen, and T.-Y. Ho, "Retention score: Quantifying jailbreak risks for vision language models," arXiv preprint arXiv:2412.17544, 2024. +[764] T. Guan, F. Liu, X. Wu, R. Xian, Z. Li, X. Liu, X. Wang, L. Chen, F. Huang, Y. Yacoob et al., "Hallusionbench: an advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models," in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 14375-14385. +[765] Y. Li, Y. Du, K. Zhou, J. Wang, W. X. Zhao, and J.-R. Wen, "Evaluating object hallucination in large vision-language models," arXiv preprint arXiv:2305.10355, 2023. +[766] C. Cui, Y. Zhou, X. Yang, S. Wu, L. Zhang, J. Zou, and H. Yao, “Holistic analysis of hallucination in gpt-4v (ision): Bias and interference challenges,” arXiv preprint arXiv:2311.03287, 2023. +[767] S. Wang, X. Ye, Q. Cheng, J. Duan, S. Li, J. Fu, X. Qiu, and X. Huang, "Cross-modality safety alignment," arXiv preprint arXiv:2406.15279, 2024. +[768] A. Agarwal, S. Panda, A. Charles, B. Kumar, H. Patel, P. Pattnayak, T. H. Rafi, T. Kumar, and D.-K. Chae, "Mvtamperbench: Evaluating robustness of vision-language models," arXiv preprint arXiv:2412.19794, 2024. +[769] H. Zhang, W. Shao, H. Liu, Y. Ma, P. Luo, Y. Qiao, and K. Zhang, "Avibench: Towards evaluating the robustness of large vision-language model on adversarial visual-instructions," arXiv e-prints, pp. arXiv-2403, 2024. +[770] Z. Hu, Y. Ren, J. Li, and Y. Yin, "Viva: A benchmark for vision-grounded decision-making with human values," arXiv preprint arXiv:2407.03000, 2024. +[771] Y. Xiao, A. Liu, Q. Cheng, Z. Yin, S. Liang, J. Li, J. Shao, X. Liu, and D. Tao, "Genderbias- $\cdot$ emph {VL}: Benchmarking gender bias in vision language models via counterfactual probing," arXiv preprint arXiv:2407.00600, 2024. +[772] L. Gustafson, C. Rolland, N. Ravi, Q. Duval, A. Adcock, C.-Y. Fu, M. Hall, and C. Ross, "Facet: Fairness in computer vision evaluation benchmark," in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023, pp. 20370-20382. +[773] E. Slyman, S. Lee, S. Cohen, and K. Kafle, "Fairdedup: Detecting and mitigating vision-language fairness disparities in semantic dataset dedduplication," in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 13905-13916. +[774] Y. Zhang, J. Wang, and J. Sang, "Counterfactually measuring and eliminating social bias in vision-language pre-training models," in Proceedings of the 30th ACM International Conference on Multimedia, 2022, pp. 4996-5004. +[775] K. C. Fraser and S. Kiritchenko, "Examining gender and racial bias in large vision-language models using a novel dataset of parallel images," arXiv preprint arXiv:2402.05779, 2024. +[776] A. Seth, M. Hemani, and C. Agarwal, "Dear: Debias + +ing vision-language models with additive residuals," in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 6820-6829. +[777] S. Janghorbani and G. De Melo, "Multimodal bias: Introducing a framework for stereotypical bias assessment beyond gender and race in vision language models," arXiv preprint arXiv:2303.12734, 2023. +[778] Y. Zhang, Y. Huang, Y. Sun, C. Liu, Z. Zhao, Z. Fang, Y. Wang, H. Chen, X. Yang, X. Wei et al., "Benchmarking trustworthiness of multimodal large language models: A comprehensive study," arXiv preprint arXiv:2406.07057, 2024. +[779] Y. Zhang, L. Chen, G. Zheng, Y. Gao, R. Zheng, J. Fu, Z. Yin, S. Jin, Y. Qiao, X. Huang et al., "Spa-vl: A comprehensive safety preference alignment dataset for vision language model," arXiv preprint arXiv:2406.12030, 2024. +[780] Z. Zhang, T. Kou, S. Wang, C. Li, W. Sun, W. Wang, X. Li, Z. Wang, X. Cao, X. Min et al., "Q-eval-100k: Evaluating visual quality and alignment level for text-to-vision content," arXiv preprint arXiv:2503.02357, 2025. +[781] W. Wang, X. Liu, K. Gao, J.-T. Huang, Y. Yuan, P. He, S. Wang, and Z. Tu, "Can't see the forest for the trees: Benchmarking multimodal safety awareness for multimodal llms," ArXiv, vol. abs/2502.11184, 2025. [Online]. Available: https://api.sementicscholar.org/CorpusID:276409442 +[782] W. Wang, K. Gao, Z. Jia, Y. Yuan, J.-T. Huang, Q. Liu, S. Wang, W. Jiao, and Z. Tu, "Chain-of-jailbreak attack for image generation models via editing step by step," ArXiv, vol. abs/2410.03869, 2024. [Online]. Available: https://api_semanticscholar.org/ CorpusID:273186566 +[783] H. Naveed, A. U. Khan, S. Qiu, M. Saqib, S. Anwar, M. Usman, N. Akhtar, N. Barnes, and A. Mian, "A comprehensive overview of large language models," arXiv preprint arXiv:2307.06435, 2023. +[784] W. Zhao, Y. Hu, Y. Deng, J. Guo, X. Sui, X. Han, A. Zhang, Y. Zhao, B. Qin, T.-S. Chua et al., "Beware of your po! measuring and mitigating ai safety risks in role-play fine-tuning of llms," arXiv preprint arXiv:2502.20968, 2025. +[785] B. Liu, X. Li, J. Zhang, J. Wang, T. He, S. Hong, H. Liu, S. Zhang, K. Song, K. Zhu et al., "Advances and challenges in foundation agents: From brain-inspired intelligence to evolutionary, collaborative, and safe systems," arXiv preprint arXiv:2504.01990, 2025. +[786] H. Jin, L. Huang, H. Cai, J. Yan, B. Li, and H. Chen, "From llms to llm-based agents for software engineering: A survey of current, challenges and future," arXiv preprint arXiv:2408.02479, 2024. +[787] J. Piao, Y. Yan, J. Zhang, N. Li, J. Yan, X. Lan, Z. Lu, Z. Zheng, J. Y. Wang, D. Zhou et al., "Agentsociety: Large-scale simulation of llm-driven generative agents advances understanding of human behaviors and society," arXiv preprint arXiv:2502.08691, 2025. +[788] Y. Yan, S. Wang, J. Huo, P. S. Yu, X. Hu, and Q. Wen, "Mathagent: Leveraging a mixture-of-math-agent framework for real-world multimodal mathematical error detection," arXiv preprint arXiv:2503.18132, 2025. + +[789] H. Wang, A. Zhang, N. Duy Tai, J. Sun, T.-S. Chua et al., "Ali-agent: Assessing llms' alignment with human values via agent-based evaluation," Advances in Neural Information Processing Systems, vol. 37, pp. 99040-99088, 2024. +[790] K. Zhang, J. Li, G. Li, X. Shi, and Z. Jin, "Codeagent: Enhancing code generation with tool-integrated agent systems for real-world repo-level coding challenges," arXiv preprint arXiv:2401.07339, 2024. +[791] Y. Shen, K. Song, X. Tan, D. Li, W. Lu, and Y. Zhuang, "Hugginggpt: Solving ai tasks with chatgpt and its friends in hugging face," Advances in Neural Information Processing Systems, vol. 36, pp. 38154-38180, 2023. +[792] Z. Chu, S. Wang, J. Xie, T. Zhu, Y. Yan, J. Ye, A. Zhong, X. Hu, J. Liang, P. S. Yu et al., "Llm agents for education: Advances and applications," arXiv preprint arXiv:2503.11733, 2025. +[793] W. Zhang, Y. Shen, W. Lu, and Y. Zhuang, "Data-copilot: Bridging billions of data and humans with autonomous workflow," arXiv preprint arXiv:2306.07209, 2023. +[794] W. Xu, Z. Liang, K. Mei, H. Gao, J. Tan, and Y. Zhang, "A-mem: Agentic memory for llm agents," arXiv preprint arXiv:2502.12110, 2025. +[795] Y. Shang, Y. Li, K. Zhao, L. Ma, J. Liu, F. Xu, and Y. Li, "Agentsquare: Automatic llm agent search in modular design space," arXiv preprint arXiv:2410.06153, 2024. +[796] J. Yang, C. Jimenez, A. Wettig, K. Lieret, S. Yao, K. Narasimhan, and O. Press, "Swe-agent: Agent-computer interfaces enable automated software engineering," Advances in Neural Information Processing Systems, vol. 37, pp. 50528-50652, 2024. +[797] S. Agashe, J. Han, S. Gan, J. Yang, A. Li, and X. E. Wang, "Agent s: An open agentic framework that uses computers like a human," arXiv preprint arXiv:2410.08164, 2024. +[798] S. Hao, Y. Gu, H. Ma, J. J. Hong, Z. Wang, D. Z. Wang, and Z. Hu, "Reasoning with language model is planning with world model," arXiv preprint arXiv:2305.14992, 2023. +[799] J. Hong, J. Lin, A. Dragan, and S. Levine, "Interactive dialogue agents via reinforcement learning on hindsight regenerations," arXiv preprint arXiv:2411.05194, 2024. +[800] J. Tang, T. Fan, and C. Huang, "Autoagent: A fully-automated and zero-code framework for llm agents," arXiv e-prints, pp. arXiv-2502, 2025. +[801] G. Li, H. Hammoud, H. Itani, D. Khizbullin, and B. Ghanem, "Camel: Communicative agents for" mind" exploration of large language model society," Advances in Neural Information Processing Systems, vol. 36, pp. 51991-52008, 2023. +[802] S. Yuan, K. Song, J. Chen, X. Tan, D. Li, and D. Yang, "Evoagent: Towards automatic multi-agent generation via evolutionary algorithms," arXiv preprint arXiv:2406.14228, 2024. +[803] M. Zhuge, W. Wang, L. Kirsch, F. Faccio, D. Khizbullin, and J. Schmidhuber, "Language agents as estimizable graphs," arXiv preprint arXiv:2402.16823, 2024. +[804] Y. Wang, T. Shen, L. Liu, and J. Xie, "Sibyl: Simple + +yet effective agent framework for complex real-world reasoning," arXiv preprint arXiv:2407.10718, 2024. +[805] Z. Wang, X. Zeng, W. Liu, L. Li, Y. Wang, L. Shang, X. Jiang, Q. Liu, and K.-F. Wong, "Toolflow: Boosting llm tool-calling through natural and coherent dialogue synthesis," arXiv preprint arXiv:2410.18447, 2024. +[806] F. Wu, S. Wu, Y. Cao, and C. Xiao, "Wipi: A new web threat for llm-driven web agents," arXiv preprint arXiv:2402.16965, 2024. +[807] S. S. Kannan, V. L. Venkatesh, and B.-C. Min, "Smartllm: Smart multi-agent robot task planning using large language models," in 2024 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2024, pp. 12140-12147. +[808] R. Fang, R. Bindu, A. Gupta, and D. Kang, "Llm agents can autonomously exploit one-day vulnerabilities," arXiv preprint arXiv:2404.08144, vol. 13, p. 14, 2024. +[809] R. Fang, R. Bindu, A. Gupta, Q. Zhan, and D. Kang, "Llm agents can autonomously hack websites," arXiv preprint arXiv:2402.06664, 2024. +[810] W. Cheng, K. Sun, X. Zhang, and W. Wang, "Security attacks on llm-based code completion tools," arXiv preprint arXiv:2408.11006, 2024. +[811] X. Fu, Z. Wang, S. Li, R. K. Gupta, N. Mireshghallah, T. Berg-Kirkpatrick, and E. Fernandes, "Misusing tools in large language models with visual adversarial examples," arXiv preprint arXiv:2310.03185, 2023. +[812] X. Fu, S. Li, Z. Wang, Y. Liu, R. K. Gupta, T. Berg-Kirkpatrick, and E. Fernandes, "Imprompter: Tricking llm agents into improper tool use," arXiv preprint arXiv:2410.14923, 2024. +[813] B. Zhang, Y. Tan, Y. Shen, A. Salem, M. Backes, S. Zannettou, and Y. Zhang, "Breaking agents: Compromising autonomous llm agents through malfunction amplification," arXiv preprint arXiv:2407.20859, 2024. +[814] H. Wang, R. Zhang, J. Wang, M. Li, Y. Huang, D. Wang, and Q. Wang, "From allies to adversaries: Manipulating llm tool-calling through adversarial injection," arXiv preprint arXiv:2412.10198, 2024. +[815] W. Yang, X. Bi, Y. Lin, S. Chen, J. Zhou, and X. Sun, "Watch out for your agents! investigating backdoor threats to lvm-based agents," Advances in Neural Information Processing Systems, vol. 37, pp. 100938-100964, 2024. +[816] P. Zhu, Z. Zhou, Y. Zhang, S. Yan, K. Wang, and S. Su, "Demonagent: Dynamically encrypted multi-backdoor implantation attack on llm-based agent," arXiv preprint arXiv:2502.12575, 2025. +[817] Y. Wang, D. Xue, S. Zhang, and S. Qian, "Badagent: Inserting and activating backdoor attacks in llm agents," arXiv preprint arXiv:2406.03007, 2024. +[818] Z. Jiang, M. Li, G. Yang, J. Wang, Y. Huang, Z. Chang, and Q. Wang, "Mimicking the familiar: Dynamic command generation for information theft attacks in llm tool-learning system," arXiv preprint arXiv:2502.11358, 2025. +[819] W. Zhao, V. Khazanchi, H. Xing, X. He, Q. Xu, and N. D. Lane, "Attacks on third-party apis of large language models," arXiv preprint arXiv:2404.16891, 2024. +[820] J. Chen and S. L. Cong, "Agentguard: Repurposing + +agentric orchestrator for safety evaluation of tool orchestration," arXiv preprint arXiv:2502.09809, 2025. +[821] X. Zhang, H. Xu, Z. Ba, Z. Wang, Y. Hong, J. Liu, Z. Qin, and K. Ren, "Privacyasst: Safeguarding user privacy in tool-using large language model agents," IEEE Transactions on Dependable and Secure Computing, 2024. +[822] Z. Xiang, L. Zheng, Y. Li, J. Hong, Q. Li, H. Xie, J. Zhang, Z. Xiong, C. Xie, C. Yang et al., "Guardagent: Safeguard llm agents by a guard agent via knowledge-enabled reasoning," arXiv preprint arXiv:2406.09187, 2024. +[823] Y. Gao, Y. Xiong, X. Gao, K. Jia, J. Pan, Y. Bi, Y. Dai, J. Sun, H. Wang, and H. Wang, "Retrieval-augmented generation for large language models: A survey," arXiv preprint arXiv:2312.10997, vol. 2, 2023. +[824] P. Zhao, H. Zhang, Q. Yu, Z. Wang, Y. Geng, F. Fu, L. Yang, W. Zhang, J. Jiang, and B. Cui, "Retrievalaugmented generation for ai-generated content: A survey," arXiv preprint arXiv:2402.19473, 2024. +[825] C. Xiang, T. Wu, Z. Zhong, D. Wagner, D. Chen, and P. Mittal, "Certifiably robust rag against retrieval corruption," arXiv preprint arXiv:2405.15556, 2024. +[826] Z. Chen, Z. Xiang, C. Xiao, D. Song, and B. Li, "Agentpoison: Red-teaming llm agents via poisoning memory or knowledge bases," Advances in Neural Information Processing Systems, vol. 37, pp. 130-185-130-213, 2025. +[827] W. Zou, R. Geng, B. Wang, and J. Jia, "Poisonedrag: Knowledge corruption attacks to retrieval-augmented generation of large language models," arXiv preprint arXiv:2402.07867, 2024. +[828] Z. Zhong, Z. Huang, A. Wettig, and D. Chen, "Poisoning retrieval corpora by injecting adversarial passages," arXiv preprint arXiv:2310.19156, 2023. +[829] X. Gu, X. Zheng, T. Pang, C. Du, Q. Liu, Y. Wang, J. Jiang, and M. Lin, "Agent smith: A single image can jailbreak one million multimodal llm agents exponentially fast," arXiv preprint arXiv:2402.08567, 2024. +[830] A. Li, Y. Zhou, V. C. Raghuram, T. Goldstein, and M. Goldblum, "Commercial llm agents are already vulnerable to simple yet dangerous attacks," arXiv preprint arXiv:2502.08586, 2025. +[831] H. Li, M. Xu, and Y. Song, "Sentence embedding leaks more information than you expect: Generative embedding inversion attack to recover the whole sentence," arXiv preprint arXiv:2305.03010, 2023. +[832] M. Russinovich, A. Salem, and R. Eldan, "Great, now write an article about that: The crescendo multi-turn llm jailbreak attack," arXiv preprint arXiv:2404.01833, 2024. +[833] Y. Cheng, M. Georgopoulos, V. Cevher, and G. G. Chrysos, "Leveraging the context through multiround interactions for jailbreaking attacks," arXiv preprint arXiv:2402.09177, 2024. +[834] A. Priyanshu and S. Vijay, "Fractured-sorry-bench: Framework for revealing attacks in conversational turns undermining refusal efficacy and defenses over sorry-bench (automated multi-shot jailbreaks)," arXiv preprint arXiv:2408.16163, 2024. +[835] D. Agarwal, A. R. Fabbri, B. Risher, P. Laban, + +S. Joty, and C.-S. Wu, "Prompt leakage effect and defense strategies for multi-turn llm interactions," arXiv preprint arXiv:2404.16251, 2024. +[836] T. Tong, J. Xu, Q. Liu, and M. Chen, "Securing multi-turn conversational language models from distributed backdoor triggers," arXiv preprint arXiv:2407.04151, 2024. +[837] J. Mao, F. Meng, Y. Duan, M. Yu, X. Jia, J. Fang, Y. Liang, K. Wang, and Q. Wen, "Agentsafe: Safeguarding large language model-based multi-agent systems via hierarchical data management," arXiv preprint arXiv:2503.04392, 2025. +[838] H. Zhou, K.-H. Lee, Z. Zhan, Y. Chen, and Z. Li, "Trustrag: Enhancing robustness and trustworthiness in rag," arXiv preprint arXiv:2501.00879, 2025. +[839] X. Xian, G. Wang, X. Bi, J. Srinivasa, A. Kundu, C. Fleming, M. Hong, and J. Ding, "On the vulnerability of applying retrieval-augmented generation within knowledge-intensive application domains," arXiv preprint arXiv:2409.17275, 2024. +[840] B. Chen, G. Wang, H. Guo, Y. Wang, and Q. Yan, "Understanding multi-turn toxic behaviors in open-domain chatbots," in Proceedings of the 26th International Symposium on Research in Attacks, Intrusions and Defenses, 2023, pp. 282-296. +[841] R. Song, M. O. Ozmen, H. Kim, A. Bianchi, and Z. B. Celik, "Enhancing llm-based autonomous driving agents to mitigate perception attacks," arXiv preprint arXiv:2409.14488, 2024. +[842] C. H. Low, Z. Wang, T. Zhang, Z. Zeng, Z. Zhuo, E. B. Mazomenos, and Y. Jin, "Surgraw: Multi-agent workflow with chain-of-thought reasoning for surgical intelligence," arXiv preprint arXiv:2503.10265, 2025. +[843] Z. Wang, J. Wu, C. H. Low, and Y. Jin, "Medagent-pro: Towards multi-modal evidence-based medical diagnosis via reasoning agentic workflow," arXiv preprint arXiv:2503.18968, 2025. +[844] K. N. Jeptoo and C. Sun, "Enhancing fake news detection with large language models through multi-agent debates," in CCF International Conference on Natural Language Processing and Chinese Computing. Springer, 2024, pp. 474-486. +[845] T. Park, "Enhancing anomaly detection in financial markets with an llm-based multi-agent framework," arXiv preprint arXiv:2403.19735, 2024. +[846] Z. Yang, S. S. Raman, A. Shah, and S. Tellex, "Plug in the safety chip: Enforcing constraints for llm-driven robot agents," in 2024 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2024, pp. 14435-14442. +[847] J. Zhang, C. Xu, and B. Li, "Chatscene: Knowledge-enabled safety-critical scenario generation for autonomous vehicles," in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 15459-15469. +[848] T. Abuelsaad, D. Akkil, P. Dey, A. Jagmohan, A. Vempaty, and R. Kokku, "Agent-e: From autonomous web navigation to foundational design principles in agenti-tic systems," arXiv preprint arXiv:2407.13032, 2024. +[849] E. Debenedetti, J. Zhang, M. Balunović, L. Beurer-Kellner, M. Fischer, and F. Tramère, "Agentdojo: A dy + +namic environment to evaluate attacks and defenses for llm agents," arXiv preprint arXiv:2406.13352, 2024. +[850] Y. Sun, N. Salami Pargoo, P. Jin, and J. Ortiz, "Optimizing autonomous driving for safety: A human-centric approach with lvm-enhanced rlhf," in Companion of the 2024 on ACM International Joint Conference on Pervasive and Ubiquitous Computing, 2024, pp. 76-80. +[851] R. Fang, R. Bindu, A. Gupta, and D. Kang, "Llm agents can autonomously exploit one-day vulnerabilities," arXiv preprint arXiv:2404.08144, vol. 13, p. 14, 2024. +[852] Y. H. Ke, R. Yang, S. A. Lie, T. X. Y. Lim, H. R. Abdullah, D. S. W. Ting, and N. Liu, "Enhancing diagnostic accuracy through multi-agent conversations: using large language models to mitigate cognitive bias," arXiv preprint arXiv:2401.14589, 2024. +[853] X. Mou, Z. Wei, and X. Huang, "Unveiling the truth and facilitating change: Towards agent-based largescale social movement simulation," arXiv preprint arXiv:2402.16333, 2024. +[854] Z. Chen, J. Chen, J. Chen, and M. Sra, "Position: Standard benchmarks fail-llm agents present overlooked risks for financial applications," arXiv preprint arXiv:2502.15865, 2025. +[855] Z. Liu, R. Zeng, D. Wang, G. Peng, J. Wang, Q. Liu, P. Liu, and W. Wang, "Agents4plc: Automating closed-loop plc code generation and verification in industrial control systems using llm-based agents," arXiv preprint arXiv:2410.14209, 2024. +[856] S. Mukherjee, P. Gamble, M. S. Ausin, N. Kant, K. Aggarwal, N. Manjunath, D. Datta, Z. Liu, J. Ding, S. Busacca et al., "Polaris: A safety-focused llm constellation architecture for healthcare," arXiv preprint arXiv:2403.13313, 2024. +[857] L. La Cava and A. Tagarelli, "Safeguarding decentralized social media: Llm agents for automating community rule compliance," arXiv preprint arXiv:2409.08963, 2024. +[858] Y. Gan, Y. Yang, Z. Ma, P. He, R. Zeng, Y. Wang, Q. Li, C. Zhou, S. Li, T. Wang et al., "Navigating the risks: A survey of security, privacy, and ethics threats in lmbased agents," arXiv preprint arXiv:2411.09523, 2024. +[859] Z. Deng, Y. Guo, C. Han, W. Ma, J. Xiong, S. Wen, and Y. Xiang, "Ai agents under threat: A survey of key security challenges and future pathways," ACM Computing Surveys, 2024. +[860] R. Ye, S. Tang, R. Ge, Y. Du, Z. Yin, S. Chen, and J. Shao, "Mas-gpt: Training llms to build llm-based multi-agent systems," arXiv preprint arXiv:2503.03686, 2025. +[861] J. Zhang, J. Xiang, Z. Yu, F. Teng, X. Chen, J. Chen, M. Zhuge, X. Cheng, S. Hong, J. Wang et al., "Aflow: Automating agentic workflow generation," arXiv preprint arXiv:2410.10762, 2024. +[862] L. Panait and S. Luke, "Cooperative multi-agent learning: The state of the art," Autonomous agents and multiagent systems, vol. 11, pp. 387-434, 2005. +[863] L. Hammond, A. Chan, J. Clifton, J. Hoelscher-Obermaier, A. Khan, E. McLean, C. Smith, W. Barfuss, J. Foerster, T. Gavencciak et al., "Multi-agent risks from advanced ai," arXiv preprint arXiv:2502.14143, 2025. + +[864] R. Xu, X. Li, S. Chen, and W. Xu, "Nuclear deployed: Analyzing catastrophic risks in decision-making of autonomous llm agents," arXiv preprint arXiv:2502.11355, 2025. +[865] Z. Zhou, Z. Li, J. Zhang, Y. Zhang, K. Wang, Y. Liu, and Q. Guo, "Corba: Contagious recursive blocking attacks on multi-agent systems based on large language models," arXiv preprint arXiv:2502.14529, 2025. +[866] Z. Tan, C. Zhao, R. Moraffah, Y. Li, Y. Kong, T. Chen, and H. Liu, "The wolf within: Covert injection of malice into mllm societies via an mllm operative," arXiv preprint arXiv:2402.14859, 2024. +[867] M. Yu, S. Wang, G. Zhang, J. Mao, C. Yin, Q. Liu, Q. Wen, K. Wang, and Y. Wang, "Netsafe: Exploring the topological safety of multi-agent networks," arXiv preprint arXiv:2410.15686, 2024. +[868] J.-t. Huang, J. Zhou, T. Jin, X. Zhou, Z. Chen, W. Wang, Y. Yuan, M. Sap, and M. R. Lyu, "On the resilience of multi-agent systems with malicious agents," arXiv preprint arXiv:2408.00989, 2024. +[869] P. He, Y. Lin, S. Dong, H. Xu, Y. Xing, and H. Liu, "Red-teaming llm multi-agent systems via communication attacks," arXiv preprint arXiv:2502.14847, 2025. +[870] Y. Tian, X. Yang, J. Zhang, Y. Dong, and H. Su, "Evil geniuses: Delving into the safety of llm-based agents," arXiv preprint arXiv:2311.11855, 2023. +[871] A. Amayuelas, X. Yang, A. Antoniades, W. Hua, L. Pan, and W. Wang, "Multiagent collaboration attack: Investigating adversarial attacks in large language model collaborations via debate," arXiv preprint arXiv:2406.14711, 2024. +[872] T. Ju, Y. Wang, X. Ma, P. Cheng, H. Zhao, Y. Wang, L. Liu, J. Xie, Z. Zhang, and G. Liu, "Flooding spread of manipulated knowledge in llm-based multi-agent communities," arXiv preprint arXiv:2407.07791, 2024. +[873] G. Lin and Q. Zhao, "Large language model sentinel: Llm agent for adversarial purification," arXiv preprint arXiv:2405.20770, 2024. +[874] Y. Zeng, Y. Wu, X. Zhang, H. Wang, and Q. Wu, "Autodefense: Multi-agent llm defense against jailbreak attacks," arXiv preprint arXiv:2403.04783, 2024. +[875] S. Chern, Z. Fan, and A. Liu, "Combating adversarial attacks with multi-agent debate," arXiv preprint arXiv:2401.05998, 2024. +[876] B. Chen, G. Li, X. Lin, Z. Wang, and J. Li, "Blockagents: Towards byzantine-robust llm-based multi-agent coordination via blockchain," in Proceedings of the ACM Turing Award Celebration Conference-China 2024, 2024, pp. 187-192. +[877] C. Song, L. Ma, J. Zheng, J. Liao, H. Kuang, and L. Yang, "Audit-llm: Multi-agent collaboration for log-based insider threat detection," arXiv preprint arXiv:2408.08902, 2024. +[878] S. Wang, G. Zhang, M. Yu, G. Wan, F. Meng, C. Guo, K. Wang, and Y. Wang, "G-safeguard: A topology-guided security lens and treatment on llm-based multi-agent systems," arXiv preprint arXiv:2502.11127, 2025. +[879] Z. Wu, S. Pan, F. Chen, G. Long, C. Zhang, and S. Y. Philip, "A comprehensive survey on graph neural networks," IEEE transactions on neural networks and + +learning systems, vol. 32, no. 1, pp. 4-24, 2020. +[880] X. Zheng, Y. Wang, Y. Liu, M. Li, M. Zhang, D. Jin, P. S. Yu, and S. Pan, "Graph neural networks for graphs with heterophily: A survey," arXiv preprint arXiv:2202.07082, 2022. +[881] M. R. Genesereth and S. P. Ketchpel, "The kqml protocol: A specification of language and communication," in Proceedings of the Third International Conference on Information and Knowledge Management (CIKM). ACM, 1993, pp. 1-10. +[882] D. S. Milojicic, M. Breugst, I. Busse, J. Campbell, S. Covaci, B. Friedman, K. Kosaka, D. B. Lange, K. Ono, M. Oshima, C. Tham, S. Virdhagriswaran, and J. White, "Masif: The omg mobile agent system interoperability facility," in Proceedings of the Second International Workshop on Mobile Agents, ser. MA '98. Berlin, Heidelberg: Springer-Verlag, 1998, p. 50-67. +[883] F. for Intelligent Physical Agents, "Fipa communicative act library specification," https://www.fipa.org/specs/fipa00037/SC00037J.html, 2000. +[884] F. Curbera, M. Duftler, R. Khalaf, W. Nagy, N. Mukhi, and S. Weerawarana, "Web services: Why and how," IBM Systems Journal, vol. 41, no. 2, pp. 170-177, 2002. +[885] G. Hohpe and B. Woolf, Enterprise Integration Patterns: Designing, Building, and Deploying Messaging Solutions, ser. Addison-Wesley Signature Series (Fowler). Addison-Wesley Professional, 2006. +[886] P. Lewis, E. Perez, A. Piktus, F. Petroni, V. Karpukhin, N. Goyal, H. Kuttler, M. Lewis, W.-t. Yih, T. Rocktäschel et al., "Retrieval-augmented generation for knowledge-intensive nlp tasks," Advances in neural information processing systems, vol. 33, pp. 9459-9474, 2020. +[887] G. Izacard and E. Grave, "Towards an efficient pipeline for knowledge-intensive nlp tasks," arXiv preprint arXiv:2112.04426, 2021. +[888] H. Chase, "Langchain: Build applications with llms through composability," https://github.com/ langchain-ai/langchain, 2022, accessed: Apr. 2025. +[889] J. Wu et al., "Llamaindex: Connecting llms to your knowledge," https://github.com/jerryjliu/llama_index, 2023, accessed: Apr. 2025. +[890] OpenAI, "Function calling in openerai models," https://platform.openai.com/docs/guides/functions, 2023, accessed: Apr. 2025. +[891] Anthropic, "Model context protocol," 2024, accessed: 2025-04-19. [Online]. Available: https://www.anthropic.com/news/model-context-protocol +[892] Google, "A2a: Agent2agent protocol," 2025, accessed: 2025-04-21. [Online]. Available: https://github.com/google/A2A +[893] G. Chang, "Anp: Agent network protocol," 2024, accessed: 2025-04-21. [Online]. Available: https://www(agent-network-protocol.com/ +[894] WildCardAI, "agents.json specification," https://github.com/wild-card-ai/agents.json, 2025, accessed: 2025-04-22. +[895] NEAR, "Aitp: Agent interaction & transaction protocol," 2025, accessed: 2025-04-22. [Online]. Available: https://aitp.dev/ +[896] L. F. Al and L. Data, "Acp: Agent communication pro + +tocol," 2025, accessed: 2025-04-22. [Online]. Available: https://github.com/orgs/i-am-bee/discussions/284 +[897] G. Cisco, Langchain, "Acp: Agent connect protocol," 2025, accessed: 2025-04-22. [Online]. Available: https://spec.acp.agntcy.org/ +[898] S. Marro, E. L. Malfa, J. Wright, G. Li, N. Shadbolt, M. Wooldridge, and P. Torr, "A scalable communication protocol for networks of large language models," 2024. [Online]. Available: https://arxiv.org/abs/2410.11905 +[899] Eclipse, "Language model operating system (lmos)," https://eclipse.dev/lmos/, 2025, accessed: 2025-04-22. +[900] AlEngineerFoundation, "Agent protocol," https://agentprotocol.ai/, 2025, accessed: 2025-04-22. +[901] R. Ranjan, S. Gupta, and S. N. Singh, "Loka protocol: A decentralized framework for trustworthy and ethical ai agent ecosystems," 2025. [Online]. Available: https://arxiv.org/abs/2504.10915 +[902] A. Srinivasan, K. Bania, S. V, H. Mestha, and S. Liu, "Implementation and application of an intelligibility protocol for interaction with an llm," 2024. [Online]. Available: https://arxiv.org/abs/2410.20600 +[903] I. Bae, J. Lee, and H.-G. Jeon, "Continuous locomotive crowd behavior generation," 2025. [Online]. Available: https://arxiv.org/abs/2504.04756 +[904] L. Gąsieniec, Łukasz Kuszner, E. Latif, R. Parasuraman, P. Spirakis, and G. Stachowiak, "Anonymous distributed localisation via spatial population protocols," 2024. [Online]. Available: https://arxiv.org/abs/2411.08434 +[905] J. Tu, T. Wang, J. Wang, S. Manivasagam, M. Ren, and R. Urtasun, "Adversarial attacks on multi-agent communication," in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2021, pp. 7768-7777. +[906] L. Yuan, F. Chen, Z. Zhang, and Y. Yu, "Communication-robust multi-agent learning by adaptable auxiliary multi-agent adversary generation," Frontiers of Computer Science, vol. 18, no. 6, p. 186331, 2024. +[907] J. Blumenkamp and A. Prorok, "The emergence of adversarial communication in multi-agent reinforcement learning," in Conference on Robot Learning. PMLR, 2021, pp. 1394-1414. +[908] Z. Chen, Z. Xiang, C. Xiao, D. Song, and B. Li, "Agent-poison: Red-teaming llm agents via poisoning memory or knowledge bases," in The Thirty-eighth Annual Conference on Neural Information Processing Systems. +[909] X. Pan, J. Dai, Y. Fan, and M. Yang, "Frontier ai systems have surpassed the self-replicating red line," arXiv preprint arXiv:2412.12140, 2024. +[910] L. Yu, Y. Qiu, Q. Yao, Y. Shen, X. Zhang, and J. Wang, "Robust communicative multi-agent reinforcement learning with active defense," in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 16, 2024, pp. 17575-17582. +[911] J. Light, M. Cai, S. Shen, and Z. Hu, "Avalonbench: Evaluating llms playing the game of avalon," arXiv preprint arXiv:2310.05036, 2023. +[912] Q. Xie, Q. Feng, T. Zhang, Q. Li, L. Yang, Y. Zhang, + +R. Feng, L. He, S. Gao, and Y. Zhang, "Human simulacra: Benchmarking the personification of large language models," arXiv preprint arXiv:2402.18180, 2024. +[913] L. Geng and E. Y. Chang, "Realm-bench: A real-world planning benchmark for llms and multi-agent systems," arXiv preprint arXiv:2502.18836, 2025. +[914] Y. Dubois, B. Galambosi, P. Liang, and T. B. Hashimoto, "Length-controlled alpacaeval: A simple way to debias automatic evaluators," arXiv preprint arXiv:2404.04475, 2024. +[915] W. Wang, J. Shi, C. Wang, C. Lee, Y. Yuan, J.-T. Huang, and M. R. Lyu, "Learning to ask: When llms meet unclear instruction," ArXiv, vol. abs/2409.00557, 2024. [Online]. Available: https://api-semanticscholar.org/CorpusID:272368496 +[916] C. Guo, X. Liu, C. Xie, A. Zhou, Y. Zeng, Z. Lin, D. Song, and B. Li, "Redcode: Risky code execution and generation benchmark for code agents," Advances in Neural Information Processing Systems, vol. 37, pp. 106-190-106-236, 2024. +[917] X. Yuan, J. Li, D. Wang, Y. Chen, X. Mao, L. Huang, H. Xue, W. Wang, K. Ren, and J. Wang, "S-eval: Automatic and adaptive test generation for benchmarking safety evaluation of large language models," arXiv preprint arXiv:2405.14191, 2024. +[918] D. Dorn, A. Variengien, C.-R. Segerie, and V. Corruble, "Bells: A framework towards future proof benchmarks for the evaluation of llm safeguards," arXiv preprint arXiv:2406.01364, 2024. +[919] Y. Shao, T. Li, W. Shi, Y. Liu, and D. Yang, "Privacylens: Evaluating privacy norm awareness of language models in action," arXiv preprint arXiv:2409.00138, 2024. +[920] Q. Zhan, Z. Liang, Z. Ying, and D. Kang, "Injecagent: Benchmarking indirect prompt injections in tool-integrated large language model agents," arXiv preprint arXiv:2403.02691, 2024. +[921] Z. Zhu, B. Wu, Z. Zhang, and B. Wu, "Riskawarebench: Towards evaluating physical risk awareness for high-level planning of llm-based embodied agents," arXiv e-prints, pp. arXiv-2408, 2024. +[922] Z. Zhang, S. Cui, Y. Lu, J. Zhou, J. Yang, H. Wang, and M. Huang, "Agent-safetybench: Evaluating the safety of llm agents," arXiv preprint arXiv:2412.14470, 2024. +[923] M. Andriushchenko, A. Souly, M. Dziemian, D. Duenas, M. Lin, J. Wang, D. Hendrycks, A. Zou, Z. Kolter, M. Fredrikson et al., "Agentharm: A benchmark for measuring harmfulness of llm agents," arXiv preprint arXiv:2410.09024, 2024. +[924] J. Ye, S. Li, G. Li, C. Huang, S. Gao, Y. Wu, Q. Zhang, T. Gui, and X. Huang, "Toolsword: Unveiling safety issues of large language models in tool learning across three stages," arXiv preprint arXiv:2402.10753, 2024. +[925] Y. Ruan, H. Dong, A. Wang, S. Pitis, Y. Zhou, J. Ba, Y. Dubois, C. J. Maddison, and T. Hashimoto, "Identifying the risks of lm agents with an lm-emulated sandbox," arXiv preprint arXiv:2309.15817, 2023. +[926] X. Zhou, H. Kim, F. Brahman, L. Jiang, H. Zhu, X. Lu, F. Xu, B. Y. Lin, Y. Choi, N. Mireshghallah et al., "Haicosystem: An ecosystem for sandboxing + +safety risks in human-ai interactions," arXiv preprint arXiv:2409.16427, 2024. +[927] S. Yin, X. Pang, Y. Ding, M. Chen, Y. Bi, Y. Xiong, W. Huang, Z. Xiang, J. Shao, and S. Chen, "Safeagent-bench: A benchmark for safe task planning of embodied llm agents," arXiv preprint arXiv:2412.13178, 2024. +[928] J. BENCHMARK, "Jailjudge: A comprehensive jailbreak judge benchmark with multi-agent enhanced explanation evaluation framework." +[929] P. Y. Zhong, S. Chen, R. Wang, M. McCall, B. L. Titzer, and H. Miller, "Rtbas: Defending llm agents against prompt injection and privacy leakage," arXiv preprint arXiv:2502.08966, 2025. +[930] A. Liu, Y. Zhou, X. Liu, T. Zhang, S. Liang, J. Wang, Y. Pu, T. Li, J. Zhang, W. Zhou et al., "Compromising lvm driven embodied agents with contextual backdoor attacks," IEEE Transactions on Information Forensics and Security, 2025. +[931] —, "Compromising embodied agents with contextual backdoor attacks," arXiv preprint arXiv:2408.02882, 2024. +[932] H. Zhang, C. Zhu, X. Wang, Z. Zhou, S. Hu, and L. Y. Zhang, "Badrobot: Jailbreaking llm-based embodied ai in the physical world," arXiv preprint arXiv:2407.20242, 2024. +[933] W. Shen, C. Li, H. Chen, M. Yan, X. Quan, H. Chen, J. Zhang, and F. Huang, "Small llms are weak tool learners: A multi-llm agent," arXiv preprint arXiv:2401.07324, 2024. +[934] S. Yuan, K. Song, J. Chen, X. Tan, Y. Shen, R. Kan, D. Li, and D. Yang, "Easytool: Enhancing llm-based agents with concise tool instruction," arXiv preprint arXiv:2401.06201, 2024. +[935] S. Wu, S. Zhao, Q. Huang, K. Huang, M. Yasunaga, K. Cao, V. Ioannidis, K. Subbian, J. Leskovec, and J. Y. Zou, "Avatar: Optimizing llm agents for tool usage via contrastive reasoning," Advances in Neural Information Processing Systems, vol. 37, pp. 25981-26010, 2024. +[936] Z. Shen, "Llm with tools: A survey," arXiv preprint arXiv:2409.18807, 2024. +[937] C. Qian, W. Liu, H. Liu, N. Chen, Y. Dang, J. Li, C. Yang, W. Chen, Y. Su, X. Cong et al., "Chatdev: Communicative agents for software development," arXiv preprint arXiv:2307.07924, 2023. +[938] Z. M. Wang, Z. Peng, H. Que, J. Liu, W. Zhou, Y. Wu, H. Guo, R. Gan, Z. Ni, J. Yang et al., "Rolellm: Benchmarking, eliciting, and enhancing role-playing abilities of large language models," arXiv preprint arXiv:2310.00746, 2023. +[939] J. Zhou, Z. Chen, D. Wan, B. Wen, Y. Song, J. Yu, Y. Huang, L. Peng, J. Yang, X. Xiao et al., "Characterglm: Customizing chinese conversational ai characters with large language models," arXiv preprint arXiv:2311.16832, 2023. +[940] Z. Chen, K. Liu, Q. Wang, W. Zhang, J. Liu, D. Lin, K. Chen, and F. Zhao, "Agent-flan: Designing data and methods of effective agent tuning for large language models," arXiv preprint arXiv:2403.12881, 2024. +[941] G. Zhang, L. Niu, J. Fang, K. Wang, L. Bai, and X. Wang, "Multi-agent architecture search via agentic supernet," arXiv preprint arXiv:2502.04180, 2025. + +[942] L. P. Kaelbling, M. L. Littman, and A. W. Moore, "Reinforcement learning: A survey," Journal of artificial intelligence research, vol. 4, pp. 237-285, 1996. +[943] Y. Li, "Deep reinforcement learning: An overview," arXiv preprint arXiv:1701.07274, 2017. +[944] X. Li, Y. Fan, and S. Cheng, "Aigc in china: Current developments and future outlook," arXiv preprint arXiv:2308.08451, 2023. +[945] X. Sun, L. Dong, X. Li, Z. Wan, S. Wang, T. Zhang, J. Li, F. Cheng, L. Lyu, F. Wu et al., "Pushing the limits of chatgpt on nlp tasks," arXiv preprint arXiv:2306.09719, 2023. +[946] G. Sriramanan, S. Bharti, V. S. Sadasivan, S. Saha, P. Kattakinda, and S. Feizi, "Llm-check: Investigating detection of hallucinations in large language models," Advances in Neural Information Processing Systems, vol. 37, pp. 34188-34216, 2024. +[947] K. Zheng, J. Chen, Y. Yan, X. Zou, and X. Hu, "Reefknot: A comprehensive benchmark for relation hallucination evaluation, analysis and mitigation in multimodal large language models," arXiv preprint arXiv:2408.09429, 2024. +[948] X. Zou, Y. Wang, Y. Yan, S. Huang, K. Zheng, J. Chen, C. Tang, and X. Hu, "Look twice before you answer: Memory-space visual retracing for hallucination mitigation in multimodal large language models," arXiv preprint arXiv:2410.03577, 2024. +[949] G. Zhou, Y. Yan, X. Zou, K. Wang, A. Liu, and X. Hu, "Mitigating modality prior-induced hallucinations in multimodal large language models via deciphering attention causality," arXiv preprint arXiv:2410.04780, 2024. +[950] W. Wang, Z. Ma, Z. Wang, C. Wu, W. Chen, X. Li, and Y. Yuan, "A survey of llm-based agents in medicine: How far are we from baymax?" ArXiv, vol. abs/2502.11211, 2025. [Online]. Available: https://api.sementicscholar.org/CorpusID:276408182 +[951] H. Kang and X.-Y. Liu, "Deficiency of large language models in finance: An empirical examination of hallucination," arXiv preprint arXiv:2311.15548, 2023. +[952] L. Ouyang, J. Wu, X. Jiang, D. Almeida, C. L. Wainwright, P. Mishkin, C. Zhang, S. Agarwal, K. Slama, A. Ray, J. Schulman, J. Hilton, F. Kelton, L. Miller, M. Simens, A. Askell, P. Welinder, P. F. Christiano, J. Leike, and R. Lowe, "Training language models to follow instructions with human feedback," in NeurIPS, 2022. +[953] Y. Liu, Y. Yao, J.-F. Ton, X. Zhang, R. Guo, H. Cheng, Y. Klochkov, M. F. Taufiq, and H. Li, "Trustworthy llms: a survey and guideline for evaluating large language models' alignment," 2024. +[954] M. Hao, H. Li, H. Chen, P. Xing, G. Xu, and T. Zhang, "Iron: Private inference on transformers," Advances in neural information processing systems, vol. 35, pp. 15718-15731, 2022. +[955] J. Huang, J.-T. Huang, Z. Liu, X. Liu, W. Wang, and J. Zhao, "Vlms as geoguessr masters: Exceptional performance, hidden biases, and privacy risks," ArXiv, vol. abs/2502.11163, 2025. [Online]. Available: https://api.sementicscholar.org/CorpusID:276409319 +[956] G. Feretzakis and V. S. Verykios, "Trustworthy ai: + +Securing sensitive data in large language models," AI, vol. 5, no. 4, pp. 2773-2800, 2024. +[957] Q. Feng, S. R. Kasa, H. Yun, C. H. Teo, and S. B. Bodapati, "Exposing privacy gaps: Membership inference attack on preference data for llm alignment," arXiv preprint arXiv:2407.06443, 2024. +[958] N. Rahman and E. Santacana, “Beyond fair use: Legal risk evaluation for training llms on copyrighted text,” in ICML Workshop on Generative AI and Law, 2023. +[959] J. Guo, Y. Li, R. Chen, Y. Wu, C. Liu, Y. Chen, and H. Huang, "Towards copyright protection for knowledge bases of retrieval-augmented language models via ownership verification with reasoning," arXiv preprint arXiv:2502.10440, 2025. +[960] S. Shao, Y. Li, H. Yao, Y. He, Z. Qin, and K. Ren, "Explanation as a watermark: Towards harmless and multi-bit model ownership verification via watermarking feature attribution," in NDSS, 2025. +[961] W. Xu, K. Gao, H. He, and M. Zhou, "Licoeval: Evaluating llms on license compliance in code generation," arXiv preprint arXiv:2408.02487, 2024. +[962] W. Qu, W. Zheng, T. Tao, D. Yin, Y. Jiang, Z. Tian, W. Zou, J. Jia, and J. Zhang, "Provably robust multi-bit watermarking for ai-generated text," arXiv preprint arXiv:2401.16820, 2024. +[963] J. Kirchenbauer, J. Geiping, Y. Wen, J. Katz, I. Miers, and T. Goldstein, "A watermark for large language models," in International Conference on Machine Learning. PMLR, 2023, pp. 17061-17084. +[964] J. Ye, Y. Wang, Y. Huang, D. Chen, Q. Zhang, N. Moniz, T. Gao, W. Geyer, C. Huang, P.-Y. Chen et al., "Justice or prejudice? quantifying biases in llm-as-a-judge," arXiv preprint arXiv:2410.02736, 2024. +[965] Y. Wan, W. Wang, P. He, J. Gu, H. Bai, and M. R. Lyu, "Biasaker: Measuring the bias in conversational ai system," Proceedings of the 31st ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering, 2023. [Online]. Available: https://api-semanticscholar.org/CorpusID:258833296 +[966] European Union, "Artificial intelligence act," 2024, accessed: 2025-03-07. [Online]. Available: https://artificialintelligenceact.eu/ +[967] Cyberspace Administration of China, "Interim measures for the management of generative artificial intelligence services," 2023, accessed: 2025-03-07. [Online]. Available: https://www.cac.gov.cn/2023-07/13/c_1690898327029107.htm +[968] The White House, "Safe, secure, and trustworthy development and use of artificial intelligence," 2023, accessed: 2025-03-07. \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15585/images/1b920951359c81b1de588e6f741d867a8d8080726518d9c47818f949e3c7423e.jpg b/data/2025/2504_15xxx/2504.15585/images/1b920951359c81b1de588e6f741d867a8d8080726518d9c47818f949e3c7423e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ee3147dd07f7655c4f2330ea11426f2d379a3973 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/1b920951359c81b1de588e6f741d867a8d8080726518d9c47818f949e3c7423e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c17490f078ae81b4c8d55a05f8719f3f2c038b991ed95cf16fe0a1668aff86a5 +size 433732 diff --git a/data/2025/2504_15xxx/2504.15585/images/24a54e97c11e8e51e263b7b98b9b21713213013b735e4963916bfb2d477a4b18.jpg b/data/2025/2504_15xxx/2504.15585/images/24a54e97c11e8e51e263b7b98b9b21713213013b735e4963916bfb2d477a4b18.jpg new file mode 100644 index 0000000000000000000000000000000000000000..62470d8688e81b59e75e41942e2dbc5766bcca08 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/24a54e97c11e8e51e263b7b98b9b21713213013b735e4963916bfb2d477a4b18.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77206bce6bc4dd8613157cce5d286c153fb19cf9e8b94fa901f007ad335ca063 +size 297691 diff --git a/data/2025/2504_15xxx/2504.15585/images/27b4f09fd68aecd75ec6c15b4737e73a3198fcfd3d6788e2cf6e5147233143f5.jpg b/data/2025/2504_15xxx/2504.15585/images/27b4f09fd68aecd75ec6c15b4737e73a3198fcfd3d6788e2cf6e5147233143f5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1870dde17dc6a496e8cecd454c9cf1d4be153b47 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/27b4f09fd68aecd75ec6c15b4737e73a3198fcfd3d6788e2cf6e5147233143f5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e0f4dc08ac63b951d27e475f94b4fd7aba7598f1f5a1aa4598b107fb4d978a12 +size 1369 diff --git a/data/2025/2504_15xxx/2504.15585/images/2fa9a8de989beb0f98e2c807f841f4935386a53ffedb159b40fad075d49e0a82.jpg b/data/2025/2504_15xxx/2504.15585/images/2fa9a8de989beb0f98e2c807f841f4935386a53ffedb159b40fad075d49e0a82.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ef44debfc181cfdf4166afc5218d315261e1e5b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/2fa9a8de989beb0f98e2c807f841f4935386a53ffedb159b40fad075d49e0a82.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d4a5bc8aa356c2a3a0cab970f60b49c489efe87f9e4c085c41a1f3ed0939b92 +size 62019 diff --git a/data/2025/2504_15xxx/2504.15585/images/3597ec62c579e34e23882fb0c0da7399687e89d674caf889fa87f31ca3686194.jpg b/data/2025/2504_15xxx/2504.15585/images/3597ec62c579e34e23882fb0c0da7399687e89d674caf889fa87f31ca3686194.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a14bf24e60756b9c427a57c4497280fb022c8e21 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/3597ec62c579e34e23882fb0c0da7399687e89d674caf889fa87f31ca3686194.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0db742ad0ca7aca63fa63aadf6f8205ed7c22514e8bcbefd8a0b665ced775116 +size 146148 diff --git a/data/2025/2504_15xxx/2504.15585/images/39576eab3b13c6f58322b854afc4d334e49b19b1700b0e374e0731a1c01b150e.jpg b/data/2025/2504_15xxx/2504.15585/images/39576eab3b13c6f58322b854afc4d334e49b19b1700b0e374e0731a1c01b150e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..753a926243db24763100bedde0a45512ad9dd0b7 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/39576eab3b13c6f58322b854afc4d334e49b19b1700b0e374e0731a1c01b150e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a343291401a184de3d380620f7d964a467ee1f9369953062714be3c8cb194b9a +size 36690 diff --git a/data/2025/2504_15xxx/2504.15585/images/3a143801a40ca350831d89f8b6734dec72d5b207c62eb1650bfeb14b6904c9f6.jpg b/data/2025/2504_15xxx/2504.15585/images/3a143801a40ca350831d89f8b6734dec72d5b207c62eb1650bfeb14b6904c9f6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..544d22004dacbe24a4b2180ddba3776b54c9b39f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/3a143801a40ca350831d89f8b6734dec72d5b207c62eb1650bfeb14b6904c9f6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab23227a49819f1b9def3eac4ba59c54f859bb794642657f7d0abbc8ef7d924b +size 39527 diff --git a/data/2025/2504_15xxx/2504.15585/images/3a418fa605c423149f828c2f001b5edc46cfe6b96a344e91855296aab87fa433.jpg b/data/2025/2504_15xxx/2504.15585/images/3a418fa605c423149f828c2f001b5edc46cfe6b96a344e91855296aab87fa433.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e9c26e1a2014c7f10ffda81082167b79dd19735 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/3a418fa605c423149f828c2f001b5edc46cfe6b96a344e91855296aab87fa433.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfbfb2119ae1993b11c0dc90b3e73216876326c40327c9cb82fc04dd8bcb8960 +size 27970 diff --git a/data/2025/2504_15xxx/2504.15585/images/3b53e9483d3d0da9577270c0536f43d5dba6fe92ad9bba6ab4c0fd3c60e6cc4b.jpg b/data/2025/2504_15xxx/2504.15585/images/3b53e9483d3d0da9577270c0536f43d5dba6fe92ad9bba6ab4c0fd3c60e6cc4b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ad6bac77982b0fa969994a460e0c05f157d22d8 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/3b53e9483d3d0da9577270c0536f43d5dba6fe92ad9bba6ab4c0fd3c60e6cc4b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ccea91290465b0b668e06e09a1d657e531839090303e27dd53a32786c98d4471 +size 27368 diff --git a/data/2025/2504_15xxx/2504.15585/images/3c01c7964a0ec82961f085a41736bb2f4f02d485345f84dac051957673a31670.jpg b/data/2025/2504_15xxx/2504.15585/images/3c01c7964a0ec82961f085a41736bb2f4f02d485345f84dac051957673a31670.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5a26aa66b7ebe394a5910084fec9f762e3f09876 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/3c01c7964a0ec82961f085a41736bb2f4f02d485345f84dac051957673a31670.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fc4b7cb7e9ce8818949696fde4d169287c58980835464b397bb1073ec622dbe +size 58748 diff --git a/data/2025/2504_15xxx/2504.15585/images/494c56ce4c14d6ec67999ba68c4f9c1261aae972f1017e76671270ae0d772dfb.jpg b/data/2025/2504_15xxx/2504.15585/images/494c56ce4c14d6ec67999ba68c4f9c1261aae972f1017e76671270ae0d772dfb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82c6af9904c818d6341665893f8efe1dd9b18980 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/494c56ce4c14d6ec67999ba68c4f9c1261aae972f1017e76671270ae0d772dfb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef03e22443c251eb4f6d403ddfdfd84b872f5577d66bbd523957889b6a6a62fa +size 30386 diff --git a/data/2025/2504_15xxx/2504.15585/images/4d34f195d34be5e5c6a43216b0ec96abe005b1a6a8f26aa7c36ce1f4814affca.jpg b/data/2025/2504_15xxx/2504.15585/images/4d34f195d34be5e5c6a43216b0ec96abe005b1a6a8f26aa7c36ce1f4814affca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d8aec87235c4a38d98c37f016abca99e5def1370 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/4d34f195d34be5e5c6a43216b0ec96abe005b1a6a8f26aa7c36ce1f4814affca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec1eab1ef62e8f928a17d7fd774dfd5817c68b4a10b4b85a2c90ad6aff103894 +size 22284 diff --git a/data/2025/2504_15xxx/2504.15585/images/507c336b5a2d24ce18489c83891919090519cf1f20f6a7cceb030ba324f22d7d.jpg b/data/2025/2504_15xxx/2504.15585/images/507c336b5a2d24ce18489c83891919090519cf1f20f6a7cceb030ba324f22d7d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..714033ea4be87c7e3922c31cf50aa69634ab352d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/507c336b5a2d24ce18489c83891919090519cf1f20f6a7cceb030ba324f22d7d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2019de42dcb40f00a6503a6cbd5ff7c69b67360b9d7735103720e35b4c00aaa6 +size 35197 diff --git a/data/2025/2504_15xxx/2504.15585/images/58dfd579ca3459049d3c5f80bf67f5511b180af8ccd32375a55790a24fa9dbda.jpg b/data/2025/2504_15xxx/2504.15585/images/58dfd579ca3459049d3c5f80bf67f5511b180af8ccd32375a55790a24fa9dbda.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e6b32913032c7de170957b9098ca0d35cae71f4b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/58dfd579ca3459049d3c5f80bf67f5511b180af8ccd32375a55790a24fa9dbda.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d5594fb898ef6d8a469f6c8df3e51ba2ce75dca96639680a0916a68b6ce19e0 +size 35668 diff --git a/data/2025/2504_15xxx/2504.15585/images/5e09ecec36584d1cf538edd7e63fbbb1fa61ac0fa57f6f3644e5882b53355973.jpg b/data/2025/2504_15xxx/2504.15585/images/5e09ecec36584d1cf538edd7e63fbbb1fa61ac0fa57f6f3644e5882b53355973.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b34cc888da4bfb5d182c450f28ce2e5de65d3a1d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/5e09ecec36584d1cf538edd7e63fbbb1fa61ac0fa57f6f3644e5882b53355973.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e95cda3bbbd737d4b246e5627de397e5fe2eafac7adce9648d2c2f268721e95 +size 43227 diff --git a/data/2025/2504_15xxx/2504.15585/images/685148b9c0f4a2a321b9e38c2fc1efd2445dfe68b9b9d5fe4b2d371fa637fe5b.jpg b/data/2025/2504_15xxx/2504.15585/images/685148b9c0f4a2a321b9e38c2fc1efd2445dfe68b9b9d5fe4b2d371fa637fe5b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6477350e14a4d0bae82c0d76f7ef542246a4b96d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/685148b9c0f4a2a321b9e38c2fc1efd2445dfe68b9b9d5fe4b2d371fa637fe5b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b344070fc7f05964b9f245d1502c8b83bf113035a4589b76078cdd197b448b52 +size 89229 diff --git a/data/2025/2504_15xxx/2504.15585/images/6efbd7f0539bfa93a6cba65f0dec18235d380fd7d9d02766723b3f95bd152dd6.jpg b/data/2025/2504_15xxx/2504.15585/images/6efbd7f0539bfa93a6cba65f0dec18235d380fd7d9d02766723b3f95bd152dd6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c573d9be40563f96ef967e26228f77409e954612 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/6efbd7f0539bfa93a6cba65f0dec18235d380fd7d9d02766723b3f95bd152dd6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:781fd00cbe70906bd2c7b7f8b9f4d9a515c75dc2516c410b09eb6b9bca1570bb +size 56013 diff --git a/data/2025/2504_15xxx/2504.15585/images/792d0e5c90e63607687a3b7c2093f939694dd3631a5e08614fac6eb7112e1843.jpg b/data/2025/2504_15xxx/2504.15585/images/792d0e5c90e63607687a3b7c2093f939694dd3631a5e08614fac6eb7112e1843.jpg new file mode 100644 index 0000000000000000000000000000000000000000..96f6025e7b9ef69391e4051757155cee38447c79 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/792d0e5c90e63607687a3b7c2093f939694dd3631a5e08614fac6eb7112e1843.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1633533d255ce524e85499a7930cbd810843d47c9bcb81436b415eea97859973 +size 241507 diff --git a/data/2025/2504_15xxx/2504.15585/images/7d17dc024ae8d367e87d8dd062de9a7bf8de5670470bc7baaeafe76bbb049324.jpg b/data/2025/2504_15xxx/2504.15585/images/7d17dc024ae8d367e87d8dd062de9a7bf8de5670470bc7baaeafe76bbb049324.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1bceb78be7454901d134835d6f75d64bfe5a48c0 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/7d17dc024ae8d367e87d8dd062de9a7bf8de5670470bc7baaeafe76bbb049324.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fff120fc7241bbd42b41b0aefd386d2c58111b0312063a3d94fef93902d956e3 +size 77088 diff --git a/data/2025/2504_15xxx/2504.15585/images/7e5d6796694a8d1a706054c8a700440e2e26505bd752ab4e4efae519e5f05197.jpg b/data/2025/2504_15xxx/2504.15585/images/7e5d6796694a8d1a706054c8a700440e2e26505bd752ab4e4efae519e5f05197.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f829b0eecfef18d52618699910198ee7eece9943 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/7e5d6796694a8d1a706054c8a700440e2e26505bd752ab4e4efae519e5f05197.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:406b435b9fa753de100da2d677c8fb26fc4932c025868f10a8c2bc6a20543863 +size 51682 diff --git a/data/2025/2504_15xxx/2504.15585/images/83ed828523c8544c3e695503562e583a5b18583530a15f2f579840c6adbbd329.jpg b/data/2025/2504_15xxx/2504.15585/images/83ed828523c8544c3e695503562e583a5b18583530a15f2f579840c6adbbd329.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0403e0e3d3107051993536fb631870b9f84d02a0 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/83ed828523c8544c3e695503562e583a5b18583530a15f2f579840c6adbbd329.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42b5719482ca246af6e0bc7ca16a4fb98e1b8730a58782ae3619986bbe669431 +size 45424 diff --git a/data/2025/2504_15xxx/2504.15585/images/869c97d2b966b4966a92a247de0ac1218fd4faaff7252718c47a2b6aff524844.jpg b/data/2025/2504_15xxx/2504.15585/images/869c97d2b966b4966a92a247de0ac1218fd4faaff7252718c47a2b6aff524844.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ee58c8c7fed6cbc1c1bee977f9cb91487ec262ac --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/869c97d2b966b4966a92a247de0ac1218fd4faaff7252718c47a2b6aff524844.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f4aeda823be15534a22542cd62b2e6748aaa7c83b776d97d4774f8b584100a9 +size 1355 diff --git a/data/2025/2504_15xxx/2504.15585/images/9deb0bb23bd9a7df4575cdd26e5b8aac051c2140a99729831eb7c59ed428f59b.jpg b/data/2025/2504_15xxx/2504.15585/images/9deb0bb23bd9a7df4575cdd26e5b8aac051c2140a99729831eb7c59ed428f59b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7142ed33f21a3a1d90375623ee4d2903710aa7d6 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/9deb0bb23bd9a7df4575cdd26e5b8aac051c2140a99729831eb7c59ed428f59b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5512059a17b44da8aaee536e272cf3bf956a7a85860d34d1f554dd6b123c6021 +size 53291 diff --git a/data/2025/2504_15xxx/2504.15585/images/a2b149c02628f0cb46be90a88c408bf263d347f8b42ba68b6b83ded7364f1a70.jpg b/data/2025/2504_15xxx/2504.15585/images/a2b149c02628f0cb46be90a88c408bf263d347f8b42ba68b6b83ded7364f1a70.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0af68f557346e86e7b8e893ed3ec4fadcb7effe --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/a2b149c02628f0cb46be90a88c408bf263d347f8b42ba68b6b83ded7364f1a70.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c686cf12e919faef036bf232eb86072859c4eba0e64b8ddff915f8e20c56873 +size 248856 diff --git a/data/2025/2504_15xxx/2504.15585/images/a912ed4af41748f546e1f71cac0eb117e621e272d4b9275fa07235d7e5605523.jpg b/data/2025/2504_15xxx/2504.15585/images/a912ed4af41748f546e1f71cac0eb117e621e272d4b9275fa07235d7e5605523.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0e0fb2cafa5bcac873ac72cb3d5d539267eda4c --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/a912ed4af41748f546e1f71cac0eb117e621e272d4b9275fa07235d7e5605523.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:068fba7c2b011e37e752669d4c8ae8d65cca37f1ecf4317e2ebdb2152186ad5b +size 54077 diff --git a/data/2025/2504_15xxx/2504.15585/images/b77e0d27b30fab18c8a5198df55bc3557bc9da7c2ff2c1bd5181dc84b6441a02.jpg b/data/2025/2504_15xxx/2504.15585/images/b77e0d27b30fab18c8a5198df55bc3557bc9da7c2ff2c1bd5181dc84b6441a02.jpg new file mode 100644 index 0000000000000000000000000000000000000000..81226f004be10d75334c6e794de0cf982ccc0811 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/b77e0d27b30fab18c8a5198df55bc3557bc9da7c2ff2c1bd5181dc84b6441a02.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a54f1478655aa77aa8e52973dcba7933fad9bd22c4d489d75565242729d4c900 +size 37554 diff --git a/data/2025/2504_15xxx/2504.15585/images/c15a0bf22feefb3d8da849662a077a55a7495872bd7c6e07d3f46668bf5282f1.jpg b/data/2025/2504_15xxx/2504.15585/images/c15a0bf22feefb3d8da849662a077a55a7495872bd7c6e07d3f46668bf5282f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb218da814b1b8065cfb1d454af3af13bb928c10 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/c15a0bf22feefb3d8da849662a077a55a7495872bd7c6e07d3f46668bf5282f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9124708bdf70242f5b914d56d85ef5e0df4a78ee0264babf52a4e935a9a979c +size 155232 diff --git a/data/2025/2504_15xxx/2504.15585/images/cf762587d56c382c4c037d7bf5aac6c071b7d0e9976abb0ab003388647d5eb60.jpg b/data/2025/2504_15xxx/2504.15585/images/cf762587d56c382c4c037d7bf5aac6c071b7d0e9976abb0ab003388647d5eb60.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6f7f22f41c8a09038c98e1ca037b2f85aa0dd71a --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/cf762587d56c382c4c037d7bf5aac6c071b7d0e9976abb0ab003388647d5eb60.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47e2b859ad4892547c4c214819abec5bf884821c924b2caf3c9a8244fe0c7032 +size 199239 diff --git a/data/2025/2504_15xxx/2504.15585/images/cf7b73540606aebc13ca472854080f2b100d90a206010547361e44605ddbdfc2.jpg b/data/2025/2504_15xxx/2504.15585/images/cf7b73540606aebc13ca472854080f2b100d90a206010547361e44605ddbdfc2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d97322e373c345f729b3209348613febb00b792c --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/cf7b73540606aebc13ca472854080f2b100d90a206010547361e44605ddbdfc2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77b18d608eb4edf5960e9500b696792d7f828fc7f2bd93e00acf62a01c5b7833 +size 34835 diff --git a/data/2025/2504_15xxx/2504.15585/images/d343f41a4fec2cbd537ab3a55e973848372f25236d86cec814430e4e0878d83c.jpg b/data/2025/2504_15xxx/2504.15585/images/d343f41a4fec2cbd537ab3a55e973848372f25236d86cec814430e4e0878d83c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16800f529e8ed71e94b316f6965a2f2e4875432d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/d343f41a4fec2cbd537ab3a55e973848372f25236d86cec814430e4e0878d83c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:820b56601f942fa26ba2b41aaf3c81ba343ea22be2ac8aea4a2193deae7a34f1 +size 1420 diff --git a/data/2025/2504_15xxx/2504.15585/images/f4ce229cb80a8c96656ded2aa655c39cbe884dd49ac1e9e0e1650264348f5451.jpg b/data/2025/2504_15xxx/2504.15585/images/f4ce229cb80a8c96656ded2aa655c39cbe884dd49ac1e9e0e1650264348f5451.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c4fc779905fe50262d563724c17d491bd115779d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/images/f4ce229cb80a8c96656ded2aa655c39cbe884dd49ac1e9e0e1650264348f5451.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fcb1f9a8d74acaec90d6c6fa79b4506fe34121ccf8e51528e482e92da65eee19 +size 35699 diff --git a/data/2025/2504_15xxx/2504.15585/layout.json b/data/2025/2504_15xxx/2504.15585/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..1e825e9a6d0ae213d23b4376484bf82fac0c4647 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15585/layout.json @@ -0,0 +1,57714 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 63, + 52, + 547, + 109 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 52, + 547, + 109 + ], + "spans": [ + { + "bbox": [ + 63, + 52, + 547, + 109 + ], + "type": "text", + "content": "A Comprehensive Survey in LLM(-Agent) Full Stack Safety: Data, Training and Deployment" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 125, + 564, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 125, + 564, + 329 + ], + "spans": [ + { + "bbox": [ + 45, + 125, + 564, + 329 + ], + "type": "text", + "content": "Kun Wang\\*1,2, Guibin Zhang\\*3, Zhenhong Zhou†4, Jiahao Wu†5,6, Miao Yu7, Shiqian Zhao1, Chenlong Yin8, Jinhu Fu9, Yibo Yan10,11, Hanjun Luo12, Liang Lin13, Zhihao Xu14, Haolang Lu1, Xinye Cao1, Xinyun Zhou1, Weifei Jin1, Fanci Meng7, Shicheng Xu15, Junyuan Mao3, Yu Wang16, Hao Wu17, Minghe Wang12, Fan Zhang18, Junfeng Fang3, Wenjie Qu3, Yue Liu3, Chengwei Liu1, Yifan Zhang19, Qiankun Li7, Chongye Guo20,21, Yalan Qin20,21, Zhaoxin Fan22, Kai Wang3, Yi Ding1, Donghai Hong23, Jiaming Ji23, Yingxin Lai24, Zitong Yu24, Xinfeng Li1, Yifan Jiang25, Yanhui Li12, Xinyu Deng12, Junlin Wu12, Dongxia Wang12, Yihao Huang1, Yufei Guo23, Jen-tse Huang26, Qiufeng Wang27, Xiaolong Jin45, Wenxuan Wang14, Dongrui Liu21, Yanwei Yue23, Wenke Huang29, Guancheng Wan30, Heng Chang46, Tianlin Li1, Yi Yu1, Chenghao Li31, Jiawei Li33, Lei Bai21, Jie Zhang4, Qing Guo4, Jingyi Wang12, Tianlong Chen32, Joey Tianyi Zhou4, Xiaojun Jia1, Weisong Sun1, Cong Wu34, Jing Chen29, Xuming Hu10,11, Yiming Li1, Xiao Wang35, Ningyu Zhang12, Luu Anh Tuan1, Guowen Xu31, Jiaheng Zhang3, Tianwei Zhang1, Xingjun Ma37, Jindong Gu38, Liang Pang15, Xiang Wang7, Bo An1, Jun Sun36, Mohit Bansal32, Shirui Pan28, Lingjuan Lyu40, Yuval Elovici41, Bhavya Kailkhura42, Yaodong Yang23, Hongwei Li31, Wenyuan Xu12, Yizhou Sun30, Wei Wang30, Qing Li5, Ke Tang6, Yu-Gang Jiang37, Felix Juefei-Xu43, Hui Xiong10,11, Xiaofeng Wang46, Dacheng Tao1, Philip S. Yu44, Qingsong Wen2, Yang Liu1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "spans": [ + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Nanyang Technological University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Squirrel AI Learning, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "National University of Singapore, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "A*STAR, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "The Hong Kong Polytechnic University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Southern University of Science and Technology, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{7}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "University of Science and Technology of China, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{8}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "The Pennsylvania State University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{9}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "TeleAI, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{10}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Hong Kong University of Science and Technology (Guangzhou), " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{11}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Hong Kong University of Science and Technology, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{12}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Zhejiang University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{13}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Institute of Information Engineering, Chinese Academy of Sciences, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{14}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Renmin University of China, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{15}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Institute of Computing Technology, Chinese Academy of Sciences, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{16}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "University of California, San Diego, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{17}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Tencent, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{18}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Georgia Institute of Technology, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{19}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Institute of Automation, Chinese Academy of Sciences, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{20}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Shanghai University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{21}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Shanghai AI Laboratory, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{22}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Beihang University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{23}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Peking University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{24}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Great Bay University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{25}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "University of Southern California, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{26}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Johns Hopkins University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{27}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Southeast University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{28}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Griffith University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{29}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Wuhan University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{30}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "University of California, Los Angeles, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{31}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "University of Electronic Science and Technology of China, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{32}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "The University of North Carolina at Chapel Hill, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{33}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Tsinghua University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{34}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "The University of Hong Kong, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{35}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "University of Washington, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{36}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Singapore Management University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{37}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Fudan University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{38}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "University of Oxford, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{39}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "New York University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{40}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Sony, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{41}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Ben Gurion University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{42}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Lawrence Livermore National Laboratory, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{43}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "New York University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{44}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "University of Illinois at Chicago, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{45}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "Purdue University, " + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "inline_equation", + "content": "^{46}" + }, + { + "bbox": [ + 45, + 337, + 564, + 540 + ], + "type": "text", + "content": "ACM Member" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 64, + 559, + 544, + 736 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 559, + 544, + 736 + ], + "spans": [ + { + "bbox": [ + 64, + 559, + 544, + 736 + ], + "type": "text", + "content": "Abstract—The remarkable success of Large Language Models (LLMs) has illuminated a promising pathway toward achieving Artificial General Intelligence for both academic and industrial communities, owing to their unprecedented performance across various applications. As LLMs continue to gain prominence in both research and commercial domains, their security and safety implications have become a growing concern, not only for researchers and corporations but also for all nations. Currently, existing surveys on LLM safety primarily focus on specific stages of the LLM lifecycle, e.g., deployment phase or fine-tuning phase, lacking a comprehensive understanding of the entire \"lifechain\" of LLMs. To address this gap, this paper introduces, for the first time, the concept of \"full-stack\" safety to systematically consider safety issues throughout the entire process of data, training (pre-training, post-training), deployment (deployment and final commercialization). Compared to the off-the-shelf LLM safety surveys, our work demonstrates several distinctive advantages: (I) Comprehensive Perspective. We define the complete LLM lifecycle as encompassing data preparation, pre-training, post-training (including alignment and fine-tuning, model editing, etc.), deployment and final commercialization. To our knowledge, this represents the first safety survey to encompass the entire lifecycle of LLMs. (II) Extensive Literature Support. Our research is grounded in an exhaustive review of over " + }, + { + "bbox": [ + 64, + 559, + 544, + 736 + ], + "type": "inline_equation", + "content": "900+" + }, + { + "bbox": [ + 64, + 559, + 544, + 736 + ], + "type": "text", + "content": " papers, ensuring comprehensive coverage and systematic organization of safety issues within a more holistic understanding. (III) Unique Insights. Through systematic literature analysis, we develop reliable roadmaps and perspectives for each chapter. Our work identifies promising research directions, including safety in data generation, alignment techniques, model editing, and LLM-based agent systems. These insights provide valuable guidance for researchers pursuing future work in this field. We provide an up-to-date review of the literature on LLM (agent) safety at https://github.com/bingreeky/full-stack-llm-safety, which can be considered a useful support for both researchers and engineers." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 14, + 225, + 35, + 563 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 225, + 35, + 563 + ], + "spans": [ + { + "bbox": [ + 14, + 225, + 35, + 563 + ], + "type": "text", + "content": "arXiv:2504.15585v4 [cs.CR] 9 Jun 2025" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 64, + 746, + 528, + 757 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 746, + 528, + 757 + ], + "spans": [ + { + "bbox": [ + 64, + 746, + 528, + 757 + ], + "type": "text", + "content": "Index Terms—Large Language Model, LLM-based Agent, Safety, Post-training, Alignment, Model Editing, Unlearning, Evaluation" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 41, + 141, + 53 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 41, + 141, + 53 + ], + "spans": [ + { + "bbox": [ + 45, + 41, + 141, + 53 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 78, + 301, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 78, + 301, + 205 + ], + "spans": [ + { + "bbox": [ + 44, + 78, + 301, + 205 + ], + "type": "text", + "content": "The emergence and success of large language models (LLMs) [1, 2, 3, 4, 5] have greatly transformed the modes of production in both academia and industry [6, 7, 8, 9, 10, 11, 12, 13], opening a potential path for the upcoming artificial general intelligence [14, 15, 16]. Going beyond this, LLMs, by integrating tools [17, 18, 19, 20], memory [21, 22, 23, 24], APIs [25, 26], and by constructing single-agent or multiagent systems with other LLMs, provide powerful tools for large models to perceive, understand, and change the environment [27, 28, 29, 30]. This has garnered considerable attention for embodied intelligence [31, 32]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 208, + 301, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 208, + 301, + 348 + ], + "spans": [ + { + "bbox": [ + 44, + 208, + 301, + 348 + ], + "type": "text", + "content": "Unfortunately, the entire lifecycle of LLMs is constantly confronted with security and safety issues [33, 34, 35, 36, 37]. During the data preparation phase, since LLMs require ample and diverse data, and a significant amount of data is sourced from the Internet and other open-source scenarios, the toxicity in the data and user privacy may seep into the model parameters, triggering crises in the model [38, 39, 40]. The pretraining process of the model, due to its unsupervised nature, unconsciously absorbs these toxic data and privacy information, thereby causing the model's \"genetic makeup\" to carry dangerous characteristics and privacy issues [41, 42, 43, 44]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 350, + 301, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 350, + 301, + 478 + ], + "spans": [ + { + "bbox": [ + 44, + 350, + 301, + 478 + ], + "type": "text", + "content": "Before the model is deployed, if it is not properly aligned with security measures, it can easily deviate from human values [45, 46]. Meanwhile, to make the model more \"specialized,\" the fine-tuning process will employ safer and more customized data to ensure the model performs flawlessly in specific domains [47, 48, 49, 50]. The model deployment process also involves issues such as jailbreak attacks and corresponding defense measures [51, 52, 53], especially for LLM-based agents [54]. These agents may become contaminated due to their interaction with tools, memory, and the environment [55, 56, 57, 58]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 481, + 301, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 481, + 301, + 656 + ], + "spans": [ + { + "bbox": [ + 44, + 481, + 301, + 656 + ], + "type": "text", + "content": "Previous surveys on LLMs have primarily focused on the research aspects of LLM itself, often overlooking detailed discussions on LLM safety [7, 34] and in-depth exploration of trustworthiness issues [75]. Meanwhile, off-the-shelf surveys that do address LLM safety tend to concentrate on various trustworthiness concerns or are limited to a single phase of the LLM lifecycle [33, 76, 77], such as the deployment stage and fine-tuning stage. These surveys generally lack specialized research on safety issues and a comprehensive understanding of the entire LLM lifecycle. Table 1 summarizes the differences between our survey and previous surveys. Upon reviewing the aforementioned survey and systematically investigating the related literature, we conclude that our survey endeavors to address several questions that existing surveys have not covered:" + } + ] + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 310, + 70, + 565, + 329 + ], + "blocks": [ + { + "bbox": [ + 326, + 38, + 548, + 63 + ], + "lines": [ + { + "bbox": [ + 326, + 38, + 548, + 63 + ], + "spans": [ + { + "bbox": [ + 326, + 38, + 548, + 63 + ], + "type": "text", + "content": "TABLE 1: Survey Comparison on LLMs and Agents settings." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 70, + 565, + 329 + ], + "lines": [ + { + "bbox": [ + 310, + 70, + 565, + 329 + ], + "spans": [ + { + "bbox": [ + 310, + 70, + 565, + 329 + ], + "type": "table", + "html": "
SurveyObjectStage*
\\( LLM^‡ \\)\\( Agent^§ \\)DataPTEditFTDepEval
Year 2023
Zhao et al. [6]S+M-X
Liang et al. [59]M-XX
Chang et al. [7]S+M-XXXX
Zhang et al. [60]S+M-XXX
Wang et al. [28]-SXXXX
Zhao et al. [61]S-XXX
Xi et al. [29]-S+MASXXXX
Shen et al. [62]S-XXX
Raijan et al. [63]S-XXXX
Kalyan et al. [64]S+M-XX
Huang et al. [51]S-XXX
Shayegani et al. [65]S+MMASXXXX
Yao et al. [66]S-XXXX
Year 2024
Guo et al. [27]-S+MASXXXX
Qin et al. [67]S+M-XX
Hadi et al. [68]S-XXX
Sun et al. [69]S+MSXXX
Das et al. [70]S-XXXX
He et al. [71]-S+M+MASXXXXX
Wang et al. [54]-S+MASXXXXX
Year 2025
Tie et al. [72]S+M-XXX
Ma et al. [33]S+MS+MXX
Huang et al. [73]S+MS+MXX
Yu et al. [74]SS+MASXXXX
Chen et al. [36]S-XX
OursS+MS+M+MAS
", + "image_path": "685148b9c0f4a2a321b9e38c2fc1efd2445dfe68b9b9d5fe4b2d371fa637fe5b.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 310, + 329, + 558, + 338 + ], + "lines": [ + { + "bbox": [ + 310, + 329, + 558, + 338 + ], + "spans": [ + { + "bbox": [ + 310, + 329, + 558, + 338 + ], + "type": "inline_equation", + "content": "\\ddagger" + }, + { + "bbox": [ + 310, + 329, + 558, + 338 + ], + "type": "text", + "content": " : Single-modal LLM (S), Multi-modal LLM (M)." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_footnote" + }, + { + "bbox": [ + 310, + 338, + 558, + 346 + ], + "lines": [ + { + "bbox": [ + 310, + 338, + 558, + 346 + ], + "spans": [ + { + "bbox": [ + 310, + 338, + 558, + 346 + ], + "type": "inline_equation", + "content": "\\S" + }, + { + "bbox": [ + 310, + 338, + 558, + 346 + ], + "type": "text", + "content": " : Single-modal Agent (S), Multi-modal Agent (M), Multi-agent System (MAS)." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_footnote" + }, + { + "bbox": [ + 310, + 346, + 548, + 354 + ], + "lines": [ + { + "bbox": [ + 310, + 346, + 548, + 354 + ], + "spans": [ + { + "bbox": [ + 310, + 346, + 548, + 354 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 310, + 346, + 548, + 354 + ], + "type": "text", + "content": " : Pre-training (PT), Fine-tuning (FT), Deployment (Dep), Evaluation (Eval)." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 324, + 380, + 342, + 395 + ], + "blocks": [ + { + "bbox": [ + 324, + 380, + 342, + 395 + ], + "lines": [ + { + "bbox": [ + 324, + 380, + 342, + 395 + ], + "spans": [ + { + "bbox": [ + 324, + 380, + 342, + 395 + ], + "type": "image", + "image_path": "d343f41a4fec2cbd537ab3a55e973848372f25236d86cec814430e4e0878d83c.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 342, + 387, + 550, + 411 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 342, + 387, + 550, + 411 + ], + "spans": [ + { + "bbox": [ + 342, + 387, + 550, + 411 + ], + "type": "text", + "content": "What aspects should the safety of large models be compass?" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 424, + 566, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 424, + 566, + 632 + ], + "spans": [ + { + "bbox": [ + 308, + 424, + 566, + 632 + ], + "type": "text", + "content": "Contribution 1. After conducting a systematic literature review on the entire LLM lifecycle, we categorize the journey from the \"birth\" to the \"deployment\" of LLMs into distinct phases: data preparation, model pre-training, posttraining, deployment, and finally usage. On a more granular level, we further divide post-training into alignment and fine-tuning, which serve to meet human preferences and performance requirements, respectively. Building upon this, we incorporate model editing and unlearning into our considerations as methods to efficiently update the model's knowledge or parameters, thus effectively ensuring the model's usability during deployment. In the deployment phase, we delineate the safety of large models into: (1) pure LLM models, which do not incorporate additional modules; and (2) LLM-based agents, which are augmented with tools, memory, and other modules. This framework encompasses the entire cycle of model parameter training, convergence, and solidification." + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 324, + 647, + 342, + 662 + ], + "blocks": [ + { + "bbox": [ + 324, + 647, + 342, + 662 + ], + "lines": [ + { + "bbox": [ + 324, + 647, + 342, + 662 + ], + "spans": [ + { + "bbox": [ + 324, + 647, + 342, + 662 + ], + "type": "image", + "image_path": "27b4f09fd68aecd75ec6c15b4737e73a3198fcfd3d6788e2cf6e5147233143f5.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 342, + 654, + 549, + 676 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 342, + 654, + 549, + 676 + ], + "spans": [ + { + "bbox": [ + 342, + 654, + 549, + 676 + ], + "type": "text", + "content": "How to provide a clearer taxonomy and literature ew?" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 689, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 689, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 689, + 566, + 748 + ], + "type": "text", + "content": "Contribution 2. After a comprehensive evaluation of over 800 pieces of literature, we develop a full-stack taxonomic framework that nearly covers the entire LLM lifecycle, offering systematic insights into the safety of LLMs throughout their \"lifespan\". We provide a more reliable" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 44, + 681, + 301, + 746 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 681, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 681, + 301, + 746 + ], + "type": "text", + "content": "Kun Wang is with Nanyang Technological University (wang.kun@ntu.edu.sg), Guibin Zhang is with National University of Singapore (guibinz@outlook.com), Jiahao Wu is with The Hong Kong Polytechnic University (jiahao.wu@connect.polyu.hk), Zhenhong Zhou is with A\\*STAR (ydyjyazhh@gmail.com), Yang Liu is with Nanyang Technological University (yangliu@ntu.edu.sg). * denotes equal contribution and † denotes the corresponding authors." + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 90 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 90 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 90 + ], + "type": "text", + "content": "correlation analysis between each phase of the LLM timeline and other relevant sections, aiding readers in understanding the safety issues of LLMs while also clarifying the research stage of each LLM phase." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 60, + 105, + 77, + 121 + ], + "blocks": [ + { + "bbox": [ + 60, + 105, + 77, + 121 + ], + "lines": [ + { + "bbox": [ + 60, + 105, + 77, + 121 + ], + "spans": [ + { + "bbox": [ + 60, + 105, + 77, + 121 + ], + "type": "image", + "image_path": "869c97d2b966b4966a92a247de0ac1218fd4faaff7252718c47a2b6aff524844.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 78, + 112, + 285, + 135 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 112, + 285, + 135 + ], + "spans": [ + { + "bbox": [ + 78, + 112, + 285, + 135 + ], + "type": "text", + "content": "What are the potential growth areas for future M safety concerns?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 144, + 301, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 144, + 301, + 248 + ], + "spans": [ + { + "bbox": [ + 44, + 144, + 301, + 248 + ], + "type": "text", + "content": "Contribution 3. Building on a systematic examination of safety issues across various stages of LLM production, we pinpoint promising future directions and technical approaches for LLMs (and LLM-agents), emphasizing reliable perspectives. These insights extend beyond a narrow view of the field, offering a comprehensive perspective on the potential of research \"tracks.\" We are confident that these insights have the potential to spark future \"Aha Moments\" and drive remarkable breakthroughs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 249, + 301, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 249, + 301, + 630 + ], + "spans": [ + { + "bbox": [ + 44, + 249, + 301, + 630 + ], + "type": "text", + "content": "Taxonomy. Our article begins with the structural preparation of data. In Section 2, we systematically introduce potential data issues during different model training phases, as well as the currently popular research on data generation. In Section 3, we focus on the security and safety concerns during the pre-training phase, which includes two core modules: data filtering and augmenting. In Section 4, we concentrate on the post-training phase, differing from previous works by incorporating fine-tuning and alignment, which involve attack, defense, and evaluation. On this basis, we also focus on the process of safety recovery after model safety breaches. In Section 5, we observe that models require dynamic updates in real-world scenarios. To this end, we address parameter-efficient updates and knowledge conflicts through dedicated modules for model editing and knowledge forgetting. Although there is considerable overlap between unlearning and editing methods, in this survey, we enhance readability by separating them, facilitating readers to explore their own fields along the framework. Subsequently, in Section 6, we focus on the safety issues after the model parameters are solidified, which share many commonalities with traditional large model security surveys. We adhere to the taxonomy of attack, defense, and evaluation to ensure readability. Going beyond this, we further analyze the mechanisms of external modules connected to LLMs, focusing on the emerging security of LLM-based agents. Finally, in Section 7, we present multiple safety concerns for the commercialization and ethical guidelines, as well as user usage, of LLM-based applications. To provide readers with a comprehensive understanding of our research framework, we dedicate Section 8 to outlining promising future research directions, while Section 9 presents synthesized conclusions and broader implications." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 631, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 631, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 631, + 301, + 746 + ], + "type": "text", + "content": "At the conclusion of each chapter, we provide a roadmap and perspective of the research content covered in the sections, to facilitate readers' clearer understanding of the technological evolution path and potential future growth areas. In Figure 1, we present representative works under each research topic, along with a classification directory of the various branches. Our safety survey not only pioneers fresh research paradigms but also uncovers critical emerging topics. By mapping security considerations throughout LLMs' complete lifecycle, we establish a standardized" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "type": "text", + "content": "research architecture that will guide both academic and industrial safety initiatives." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 83, + 399, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 83, + 399, + 95 + ], + "spans": [ + { + "bbox": [ + 309, + 83, + 399, + 95 + ], + "type": "text", + "content": "2 DATA SAFETY" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 101, + 566, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 101, + 566, + 263 + ], + "spans": [ + { + "bbox": [ + 307, + 101, + 566, + 263 + ], + "type": "text", + "content": "In the first section, we begin with the data. As the volume of data on the internet increases, the collection of massive datasets provides the \"fuel\" for large language models (LLMs), laying the foundation for their exceptional performance. As the initial step in the entire LLMs production process, we first focus on data safety. Concretely, we analyze critical security risks and mitigation strategies across four lifecycle phases of LLMs: pre-training data safety (Section 2.1), fine-tuning data safety (Section 2.2) and alignment data safety (Section 2.3). Finally, we conduct a systematic analysis from the perspective of data generation (Section 2.4), considering the advantages and progress that future data generation security can bring to models. We summarize the literature on secure and reliable data generation." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 280, + 440, + 293 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 280, + 440, + 293 + ], + "spans": [ + { + "bbox": [ + 308, + 280, + 440, + 293 + ], + "type": "text", + "content": "2.1 Pretraining Data Safety" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 296, + 565, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 296, + 565, + 422 + ], + "spans": [ + { + "bbox": [ + 307, + 296, + 565, + 422 + ], + "type": "text", + "content": "The pretraining phase of LLMs relies heavily on massive, diverse datasets collected from the Internet [78, 79, 80] or open-source data platforms [81, 82] (e.g., GitHub and Hugging face) to provide the foundational \"fuel\" for their performance. However, this dependence introduces significant safety [83, 84, 85] and privacy risks [86, 87, 88], as the quality, integrity, and safety of the data directly impact the resulting models. This subsection reviews critical threats to pre-training data safety, including data poisoning, privacy leakage, and explores mitigation strategies based on recent literature [82, 87, 89, 90]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 423, + 565, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 423, + 565, + 654 + ], + "spans": [ + { + "bbox": [ + 308, + 423, + 565, + 654 + ], + "type": "text", + "content": "Training Data Poisoning. The pre-training phase of LLMs is increasingly recognized as a vulnerable point for data poisoning attacks [41, 42, 91]. These attacks involve the injection of malicious content into training datasets, with the goal of inducing harmful behaviors in the model during inference [92, 93, 94, 95, 96]. Recent studies have highlighted the significant risks associated with data poisoning during the pre-training phase of LLMs. For example, [84] and [85] both highlight that small fractions of poisoned data (as low as " + }, + { + "bbox": [ + 308, + 423, + 565, + 654 + ], + "type": "inline_equation", + "content": "0.1\\%" + }, + { + "bbox": [ + 308, + 423, + 565, + 654 + ], + "type": "text", + "content": ") can have lasting impacts on model behavior, even after extensive fine-tuning. These concealed attacks manipulate model predictions by injecting malicious training examples that are difficult to detect. Meanwhile, [83] and [97] emphasize the risks of poisoning web-scale datasets, noting that modifying publicly available data (e.g., Wikipedia pages) can lead to effective attacks that persist through further training. The study by Sun et al. [81] show that code poisoning by simply modifying one variable/function name can enable the code language model for the code search task to make vulnerable code rank in the top " + }, + { + "bbox": [ + 308, + 423, + 565, + 654 + ], + "type": "inline_equation", + "content": "11\\%" + }, + { + "bbox": [ + 308, + 423, + 565, + 654 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 654, + 566, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 654, + 566, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 654, + 566, + 746 + ], + "type": "text", + "content": "Privacy leakage. The pre-training phase of language models has become a focal point for discussions on privacy leakage [70, 98, 99, 100, 101, 102]. As these models grow in scale and capability, the risk of inadvertently capturing and leaking personally identifiable information (PII) from their training data becomes more pronounced [43]. [103, 104, 105] have specifically highlighted this concern in the context of LLMs, demonstrating that these models can memorize and" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 44, + 45, + 566, + 456 + ], + "blocks": [ + { + "bbox": [ + 44, + 45, + 566, + 456 + ], + "lines": [ + { + "bbox": [ + 44, + 45, + 566, + 456 + ], + "spans": [ + { + "bbox": [ + 44, + 45, + 566, + 456 + ], + "type": "image", + "image_path": "24a54e97c11e8e51e263b7b98b9b21713213013b735e4963916bfb2d477a4b18.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 457, + 563, + 471 + ], + "lines": [ + { + "bbox": [ + 45, + 457, + 563, + 471 + ], + "spans": [ + { + "bbox": [ + 45, + 457, + 563, + 471 + ], + "type": "text", + "content": "Fig. 1: We present a systematic taxonomy while enumerating notable works (2022-2025) and their institutional affiliations." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 490, + 300, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 490, + 300, + 710 + ], + "spans": [ + { + "bbox": [ + 44, + 490, + 300, + 710 + ], + "type": "text", + "content": "reproduce sensitive information through targeted attacks. Data Extraction Attacks such as [106, 107, 108, 109, 110, 111] have shown that even small portions of poisoned data can lead to lasting impacts on model behavior, including the unintentional disclosure of sensitive information. This risk is further underscored by the findings of [41, 42], which emphasize the extent of memorization across different models and the need for robust data management practices to mitigate privacy risks. Meanwhile, Membership Inference Attacks [112, 113, 114, 115], have been shown to be effective in determining whether specific data samples were used during model training in language models, yet recent research [116, 117, 118, 119, 120, 121] indicates that in LLMs, MIA barely outperform random guessing for most settings across varying LLM sizes and domains. Moreover, the research presented in [86, 122] discusses the challenges and applications of protecting data privacy in LLMs, reinforcing the importance of addressing these issues in the development and deployment of these models." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 712, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 712, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 712, + 301, + 746 + ], + "type": "text", + "content": "Mitigation strategies against data insecurity in LLM pre-training include several key interventions. To address toxic content, custom classifiers trained on safety datasets" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 308, + 490, + 564, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 490, + 564, + 583 + ], + "spans": [ + { + "bbox": [ + 308, + 490, + 564, + 583 + ], + "type": "text", + "content": "are employed to detect and filter pre-training data [89, 123, 124]. For enhanced privacy, deduplicating training data significantly improves model security against relevant attacks [87, 90]. Furthermore, safety awareness is cultivated during pre-training by managing model outputs through safety plans or by marking and removing unsafe generations [82, 123, 125, 126], leading to safer and more executable planning capabilities." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 308, + 584, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 584, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 584, + 564, + 748 + ], + "type": "text", + "content": "Mitigation measures. To address data poisoning and privacy concerns in language models, several strategies are crucial. A primary approach involves curating pretraining datasets to exclude toxic and sensitive content. [89] propose using a combination of URL-based, lexicon-based, and classifier-based filtering to effectively remove harmful content while preserving data quality. Another important strategy is employing data dedduplication techniques, which can prevent model memorization of specific instances, thereby reducing privacy risks. [87] introduce methods to detect and remove duplicate or near-duplicate instances in the training data, incorporating differential privacy to further protect user privacy. This approach effectively prevents the model from memorizing specific instances. In addition, developing" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 50, + 44, + 299, + 335 + ], + "blocks": [ + { + "bbox": [ + 50, + 44, + 299, + 335 + ], + "lines": [ + { + "bbox": [ + 50, + 44, + 299, + 335 + ], + "spans": [ + { + "bbox": [ + 50, + 44, + 299, + 335 + ], + "type": "image", + "image_path": "c15a0bf22feefb3d8da849662a077a55a7495872bd7c6e07d3f46668bf5282f1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 342, + 293, + 389 + ], + "lines": [ + { + "bbox": [ + 52, + 342, + 293, + 389 + ], + "spans": [ + { + "bbox": [ + 52, + 342, + 293, + 389 + ], + "type": "text", + "content": "Fig. 2: LLMs encounter a wide range of data safety risks throughout their lifecycle, from the initial stages of data collection and pre-processing to model training, deployment, and ongoing updates." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 407, + 300, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 407, + 300, + 489 + ], + "spans": [ + { + "bbox": [ + 44, + 407, + 300, + 489 + ], + "type": "text", + "content": "robust defenses against data poisoning is vital to ensure that models are less susceptible to manipulation through malicious data injection. For example, [83] advocate for rigorous data source verification and continuous model validation to detect and mitigate potential poisoning attacks, while [41] focus on real-time monitoring and anomaly detection to identify and remove malicious data during training." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 499, + 178, + 511 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 499, + 178, + 511 + ], + "spans": [ + { + "bbox": [ + 44, + 499, + 178, + 511 + ], + "type": "text", + "content": "2.2 Fine-tuning Data Safety" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 514, + 300, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 514, + 300, + 617 + ], + "spans": [ + { + "bbox": [ + 44, + 514, + 300, + 617 + ], + "type": "text", + "content": "Data safety in the fine-tuning stage has emerged as a critical concern in the development of LLMs, with data poisoning attacks presenting particularly sophisticated threats to LLMs [127]. Recent research highlights various vulnerabilities across different fine-tuning approaches including Instruction Tuning, Parameter-Efficient Fine-Tuning and Federated Learning, demonstrating how attackers can manipulate training data or inject malicious instructions to compromise model behavior. These risks include:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 41, + 619, + 301, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 619, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 41, + 619, + 301, + 747 + ], + "type": "inline_equation", + "content": "\\Rightarrow" + }, + { + "bbox": [ + 41, + 619, + 301, + 747 + ], + "type": "text", + "content": " Instruction Tuning Risks. Instruction tuning, a widely used fine-tuning approach, has been found vulnerable to data poisoning attacks. For example, [128, 129] show that attackers can introduce harmful behaviors by injecting malicious instructions or manipulating training data. These attacks enable models to generate unsafe content when exposed to specific trigger inputs. Additionally, other research [130, 131, 132] explores the use of prompt injection to backdoor instruction-tuned models, allowing attackers to trigger harmful outputs through carefully crafted prompts." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 306, + 42, + 565, + 355 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 306, + 42, + 564, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 42, + 564, + 179 + ], + "spans": [ + { + "bbox": [ + 306, + 42, + 564, + 179 + ], + "type": "text", + "content": "Parameter-Efficient Fine-Tuning Risks. Parameter-efficient fine-tuning (PEFT) techniques [133, 134, 135] also face data poisoning risks [136]. [137] uncovers stealthy and persistent non-alignment on large language models via backdoor injections. Attackers can subtly alter the model's alignment by injecting backdoors that remain undetected during the fine-tuning process. [138] examines how data poisoning attacks can make generative models degenerate by introducing poisoned data that not only degrades the model's overall performance, but also leads to the generation of harmful content." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 306, + 180, + 565, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 180, + 565, + 355 + ], + "spans": [ + { + "bbox": [ + 306, + 180, + 565, + 355 + ], + "type": "text", + "content": "Federated Learning Risks. Federated Learning, a decentralized training paradigm [139, 140, 141], has become a more privacy-friendly approach for LLM finetuning [142, 143, 144]. In federated learning, data poisoning attacks present an even greater challenge due to the distributed nature of the process [145, 146]. Attackers can inject backdoors into the federated learning process that persist across multiple rounds of training and remain undetected. [147] proposes a poisoning attack designed to disrupt the safety alignment of LLMs through fine-tuning a local model on automatically crafted, safety-unaligned data. [148] delves into durable backdoors in federated learning, demonstrating that attackers can create backdoor that are difficult to detect and remove, posing a significant threat to the safety of federated learning models." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 309, + 369, + 436, + 381 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 369, + 436, + 381 + ], + "spans": [ + { + "bbox": [ + 309, + 369, + 436, + 381 + ], + "type": "text", + "content": "2.3 Alignment Data Safety" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 384, + 564, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 384, + 564, + 453 + ], + "spans": [ + { + "bbox": [ + 308, + 384, + 564, + 453 + ], + "type": "text", + "content": "From a data-centric perspective, data poisoning attacks pose a significant threat to the integrity and reliability of LLMs by corrupting the training datasets [149, 150]. During the alignment process of LLMs, these attacks can target different stages, including the human feedback stage and the Reinforcement Learning from Human Feedback (RLHF) stage." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 456, + 565, + 746 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 306, + 456, + 564, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 456, + 564, + 606 + ], + "spans": [ + { + "bbox": [ + 306, + 456, + 564, + 606 + ], + "type": "text", + "content": "Human Feedback Stage. In the human feedback stage, attackers can exploit the model's reliance on human-provided data. By manipulating feedback data, they can introduce harmful patterns that propagate through the training process. Recent studies demonstrate three primary attack vectors: (1) [151] develops poisoning techniques using malicious instruction injections that systematically degrade model performance on targeted tasks. (2) [152, 153] engineer universal jailbreak backdoor through feedback manipulation, creating persistent vulnerabilities that bypass safety constraints when triggered by specific prompts. (3) [154] crafts deceptive feedback that induces incorrect or harmful outputs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 306, + 608, + 565, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 608, + 565, + 746 + ], + "spans": [ + { + "bbox": [ + 306, + 608, + 565, + 746 + ], + "type": "inline_equation", + "content": "\\nRightarrow" + }, + { + "bbox": [ + 306, + 608, + 565, + 746 + ], + "type": "text", + "content": " Reinforcement Learning from Human Feedback (RLHF) Stage. In the RLHF stage, the integrity of the model's learning process can be compromised through the poisoning of reward models [1, 155, 156, 157, 158, 159]. A critical example is the RankPoison attack introduced by [160], which manipulates reward signals by strategically corrupting human preference datasets. Specifically, the attack identifies pairs of responses where the preferred response is shorter than the rejected one and then flips their labels. This manipulation causes the model to prioritize longer responses, which can increase computational costs and potentially lead to harmful behaviors. This underscores" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 42, + 299, + 65 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 42, + 299, + 65 + ], + "spans": [ + { + "bbox": [ + 53, + 42, + 299, + 65 + ], + "type": "text", + "content": "the importance of robust safeguards in preference data curation and reward model validation during alignment." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 83, + 187, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 83, + 187, + 95 + ], + "spans": [ + { + "bbox": [ + 45, + 83, + 187, + 95 + ], + "type": "text", + "content": "2.4 Safety in Data Generation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 99, + 301, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 99, + 301, + 215 + ], + "spans": [ + { + "bbox": [ + 44, + 99, + 301, + 215 + ], + "type": "text", + "content": "The rapid expansion of LLMs has led to a looming data exhaustion crisis, where high-quality data for pretraining, post-training, and evaluation is becoming increasingly scarce. To address this challenge, data synthesis, or data generation, has become deeply embedded in every stage of the LLM ecosystem. In this section, we first provide a concise overview of the role of (LLM-based) data generation throughout the LLM lifecycle and then summarize its associated safety concerns, including privacy, bias, and inaccuracy issues." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 216, + 301, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 216, + 301, + 515 + ], + "spans": [ + { + "bbox": [ + 44, + 216, + 301, + 515 + ], + "type": "text", + "content": "Data Generation in the Lifecycle of LLMs. Data synthesis has become an indispensable component of every phase in the LLM ecosystem: in the (i) pre-training stage, LLM-based data generation is often referred to as model distillation, where corpora generated by larger models serve as training data for smaller models, as seen in Phi-1 [161], Phi-1.5 [162], and AnyGPT [163], among others. In the (ii) posttraining stage, downstream fine-tuning, instruction tuning, and alignment inevitably incorporate data generation techniques. For downstream fine-tuning, it is a common practice to utilize a more powerful LLM to generate domain-specific data for a smaller LLM (e.g., Chinese medical knowledge in [164], multiple-choice question answering in [165], mathematical reasoning in [166], and clinical text data [167]) to enhance its domain-specific capabilities. It is also empirically validated that LLM-generated data (e.g., action trajectories, question-answer pairs) can be beneficial for improving the reasoning [168, 169], planning, function calling [170] abilities. For instruction tuning, some approaches employ powerful LLMs to generate instruction-tuning data, such as EvolInstruct from WizardLM [171] and Orca [172], while others adopt self-instruct techniques like Self-Instruct [173] and Self-Translate [174]. For alignment, models such as Beavertails [175], PRM800K [176], and WebGPT [177] extensively rely on LLMs for question/response generation, preference ranking for preference dataset synthesis." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 515, + 302, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 515, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 515, + 302, + 748 + ], + "type": "text", + "content": "Safety Issues and Mitigation. Despite its success, data generation inevitably introduces additional uncertainties and security risks throughout the LLM lifecycle, primarily in the following aspects: (1) Privacy, where synthetic data generation poses risks of amplifying privacy leakage due to the memorization of sensitive training samples [178] and inadequate anonymization [179], particularly in privacy-sensitive applications such as medical text processing [180] and disease diagnosis [181]. (2) Bias and Fairness, as LLMs inherently exhibit societal biases [182] (e.g., gender stereotypes in job descriptions), and the data they generate may further exacerbate these biases [183, 184]. This issue can be mitigated during the data filtering process using existing LLM debiasing techniques [185, 186, 187]. (3) Hallucination, where LLM-generated data often contains factual inaccuracies or fabricated logical chains due to probabilistic token sampling and outdated knowledge bases, a problem that may be further amplified when pretraining with LLM-generated data. Potential solutions include filtering generated data using existing hallucination detection" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 308, + 42, + 566, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 566, + 113 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 566, + 113 + ], + "type": "text", + "content": "techniques [188, 189]. (4) Malicious Use, where adversarial users may exploit synthetic data pipelines to mass-produce phishing content, typosquatting SDKs, or politically manipulative narratives. (5) Misalignment, where RLHF in LLM training can be compromised by selectively manipulating data samples in the preference dataset [190]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 309, + 126, + 444, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 126, + 444, + 138 + ], + "spans": [ + { + "bbox": [ + 309, + 126, + 444, + 138 + ], + "type": "text", + "content": "2.5 Roadmap & Perspective" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 140, + 447, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 140, + 447, + 152 + ], + "spans": [ + { + "bbox": [ + 309, + 140, + 447, + 152 + ], + "type": "text", + "content": "2.5.1 Reliable Data Distillation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 155, + 566, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 155, + 566, + 525 + ], + "spans": [ + { + "bbox": [ + 307, + 155, + 566, + 525 + ], + "type": "text", + "content": "The proliferation of LLM-driven data synthesis for knowledge distillation and model self-improvement introduces critical security vulnerabilities across the entire LLM lifecycle. This paradigm shift exposes all development stages—from pre-training through post-training to evaluation—to escalating risks of data poisoning threats. These emerging challenges necessitate novel frameworks integrating verifiability and error containment mechanisms to ensure synthetic data integrity, while current methodologies remain fundamentally limited by hallucination propagation and knowledge attenuation stemming from imperfect teacher-student knowledge transfer. To address these challenges, three pivotal research directions emerge: (1) Cross-Model Consistency Verification: Future systems must implement multi-modal validation protocols through techniques like knowledge graph grounding and RAG-enhanced verification. Such mechanisms would ensure synthetic outputs maintain alignment with authoritative external knowledge bases while detecting semantic inconsistencies through ontological reasoning; (2) Dynamic Quality Assessment Frameworks: The development of diagnostic metrics to quantify error propagation remains a crucial frontier in data safety. Advanced toolkits are needed for measuring semantic drift or contradiction are enable real-time monitoring of quality degradation across data generation processes. (3) Heterogeneous Filtering Pipelines: While existing filtering mechanisms provide partial solutions, significant progress lies in effectively synthesizing multi-source verification signals, including human expert insight, rule-based invalidators, and model-based critics specializing in detecting nuanced factual discrepancies through contrastive learning paradigms." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 309, + 534, + 488, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 534, + 488, + 547 + ], + "spans": [ + { + "bbox": [ + 309, + 534, + 488, + 547 + ], + "type": "text", + "content": "2.5.2 Novel Data Generation Paradigms" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 548, + 566, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 548, + 566, + 700 + ], + "spans": [ + { + "bbox": [ + 307, + 548, + 566, + 700 + ], + "type": "text", + "content": "Emerging approaches in data generation should leverage agent-based simulation frameworks to create a self-sustaining data flywheel for LLMs. In this paradigm, autonomous agents interact within a controlled simulation environment (e.g., Github, StackOverflow) to generate, evaluate, and iteratively refine synthetic datasets with minimal human intervention. Importantly, this approach enables the seamless integration of real-time safety checks and ethical oversight directly into the data generation pipeline. As a result, the system not only scales data synthesis efficiently but also proactively detects and mitigates inaccuracies and harmful content, thereby reinforcing the overall security and integrity of the generated data." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 709, + 517, + 721 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 709, + 517, + 721 + ], + "spans": [ + { + "bbox": [ + 309, + 709, + 517, + 721 + ], + "type": "text", + "content": "2.5.3 Advanced Data Poisoning & Depoisoning" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 723, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 723, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 723, + 566, + 748 + ], + "type": "text", + "content": "Future poisoning techniques are anticipated to evolve in several sophisticated directions. On the poisoning front," + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 355 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 355 + ], + "type": "text", + "content": "adversaries may go toward fragment poisoning and covert poisoning paradigms. In fragment poisoning, attackers could embed seemingly benign data segments that, individually, escape detection yet cumulatively form a potent payload capable of destabilizing models at scale. Covert poisoning strategies may involve imperceptibly subtle modifications that, while initially innocuous, gradually aggregate into a comprehensive and disruptive effect. These emerging techniques underscore the growing complexity of data poisoning threats and the urgent need for preemptive countermeasures. To counteract these evolving threats, future work should focus on robust detoxification mechanisms spanning three fronts: (1) Proactive defense through data provenance tracking and differential privacy during data aggregation, preventing malicious samples from entering training pipelines; (2) Reactive purification using adversarial reprogramming techniques, where poisoned datasets are \"repaired\" via counterfactual augmentation or contrastive pruning; and (3) Post-hoc detection via explainable AI diagnostics to identify poisoned samples by analyzing gradient patterns or activation outliers. Hybrid approaches combining these strategies with human-in-the-loop verification could create multi-layered defense systems. Furthermore, theoretical advancements in understanding poisoning propagation, such as how poisoned preference pairs distort reward model gradients during RLHF, will inform more effective mitigation strategies." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 372, + 180, + 383 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 372, + 180, + 383 + ], + "spans": [ + { + "bbox": [ + 45, + 372, + 180, + 383 + ], + "type": "text", + "content": "3 PRE-TRAINING SAFETY" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 388, + 300, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 388, + 300, + 573 + ], + "spans": [ + { + "bbox": [ + 44, + 388, + 300, + 573 + ], + "type": "text", + "content": "In this section, we examine the safety of LLMs in the pretraining phase, covering two key dimensions: Pre-training Data Filtering (Section 3.1) and Pre-training Data Augmentation (Section 3.2). Since the pretraining phase typically does not involve active adversarial attacks, our discussion primarily focuses on both the inherent risks present in largescale corpora [2, 4, 78, 81, 82, 97, 124, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205], such as harmful content and privacy violations—and strategies for augmenting the safety of training data, including integrating safe demonstration examples [191, 206, 207, 208] and annotating toxic content to better mitigate these risks [124, 195, 207, 209]. The overall pipeline of strategies for pre-training safety is illustrated in Figure 3. Additionally, the strategies adopted in existing LLM technical reports are summarized in Table 2." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 590, + 217, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 590, + 217, + 601 + ], + "spans": [ + { + "bbox": [ + 44, + 590, + 217, + 601 + ], + "type": "text", + "content": "3.1 Data Filtering for Pretrain Safety" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 605, + 182, + 616 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 605, + 182, + 616 + ], + "spans": [ + { + "bbox": [ + 45, + 605, + 182, + 616 + ], + "type": "text", + "content": "3.1.1 Heuristic based Filtering" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 619, + 300, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 619, + 300, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 619, + 300, + 746 + ], + "type": "text", + "content": "Heuristic-based filtering, leveraging domain blacklist [78, 193, 194], keyword-based matching [191, 193] and predefined rules [2, 124, 195, 202], is one of the most widely adopted approaches to remove undesirable content before training. With most training data sourced from the Internet [211], domain blacklist provides an efficient initial safeguard by filtering predefined harmful websites and domains. [194] compiles a 13M unsafe domain list, while [78] aggregates a 4.6M URL blacklist targeting spam and adult content. In practice, domains with a high likelihood of containing personally identifiable information (PII) are also" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 310, + 38, + 564, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 38, + 564, + 118 + ], + "spans": [ + { + "bbox": [ + 310, + 38, + 564, + 118 + ], + "type": "text", + "content": "TABLE 2: Strategies for Enhancing Safety in the Pre-training Stage. " + }, + { + "bbox": [ + 310, + 38, + 564, + 118 + ], + "type": "inline_equation", + "content": "\\checkmark" + }, + { + "bbox": [ + 310, + 38, + 564, + 118 + ], + "type": "text", + "content": " indicates that the method is mentioned in the model's technical report, while - denotes that the method is not referenced. " + }, + { + "bbox": [ + 310, + 38, + 564, + 118 + ], + "type": "inline_equation", + "content": "①" + }, + { + "bbox": [ + 310, + 38, + 564, + 118 + ], + "type": "text", + "content": " represents Integrating Safe Demonstration, and A denotes Annotating Toxic Content. \"Augmenting\" denotes Augmenting Training Data." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 310, + 127, + 565, + 318 + ], + "blocks": [ + { + "bbox": [ + 310, + 127, + 565, + 318 + ], + "lines": [ + { + "bbox": [ + 310, + 127, + 565, + 318 + ], + "spans": [ + { + "bbox": [ + 310, + 127, + 565, + 318 + ], + "type": "table", + "html": "
ModelData FilteringAugmentation
Heuristic-Model-Blackbox
GPT-4 [191]--
GPT-4o(mini) [124, 202]-
GPT-o1 [201]--
Llama2 [2]---
Llama3 [193]--
Yi [192]--
InternLM2 [194]--
PaLM2 [195]--A
DeepSeek-V2 [4]---
ChatGLM [196]---
Baichuan2 [203]--
Gemini [197]-
Gemini1.5 [209]-
TigerBot [206]--1
Gemma [198]--
Nemotron-4 [200, 210]--
RefinedWeb [78]---
", + "image_path": "6efbd7f0539bfa93a6cba65f0dec18235d380fd7d9d02766723b3f95bd152dd6.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 335, + 564, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 335, + 564, + 498 + ], + "spans": [ + { + "bbox": [ + 308, + 335, + 564, + 498 + ], + "type": "text", + "content": "included in the blacklist [2, 193, 195, 202]. Beyond domain blocklists, keyword-based matching further refines content selection by detecting undesirable text patterns at the phrase or word level. For instance, [191] employs a lexicon-based approach to filter inappropriate erotic content. Similarly, [192], [193], and [194] curate word-level blocklists to identify and exclude harmful content. Given that domain blacklist and keyword-based matching might inadvertently exclude a large amount of data [194], developing heuristic-based filtering based on carefully predefined rules provides a balance between content safety and data retention. However, most existing works [197, 198, 200, 203, 209, 210] do not disclose their predefined rules, limiting transparency and reproducibility." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 309, + 506, + 435, + 518 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 506, + 435, + 518 + ], + "spans": [ + { + "bbox": [ + 309, + 506, + 435, + 518 + ], + "type": "text", + "content": "3.1.2 Model based Filtering" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 520, + 565, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 520, + 565, + 682 + ], + "spans": [ + { + "bbox": [ + 307, + 520, + 565, + 682 + ], + "type": "text", + "content": "Model-based filtering leverages learned representations to assess content adaptively. [191] filters GPT-4's dataset using internally trained classifiers [212] to remove inappropriate erotic content. [192] employs the Safety Scorer to remove toxic web content, such as violence, pornography, and political propaganda. [194] fine-tunes BERT on the Kaggle \"Toxic Comment Classification Challenge\" dataset and a pornography classification dataset annotated via the Perspective " + }, + { + "bbox": [ + 307, + 520, + 565, + 682 + ], + "type": "inline_equation", + "content": "\\mathrm{API}^1" + }, + { + "bbox": [ + 307, + 520, + 565, + 682 + ], + "type": "text", + "content": ", using the resulting classifiers for secondary filtering to ensure safer data. Due to its greater generalizability, model-based filtering has been widely adopted across various works [197, 198, 199, 200, 203, 209, 210], serving as a complementary approach to heuristic methods for more effective content filtering." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 691, + 419, + 703 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 691, + 419, + 703 + ], + "spans": [ + { + "bbox": [ + 309, + 691, + 419, + 703 + ], + "type": "text", + "content": "3.1.3 Blackbox Filtering" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 704, + 564, + 728 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 704, + 564, + 728 + ], + "spans": [ + { + "bbox": [ + 308, + 704, + 564, + 728 + ], + "type": "text", + "content": "Blackbox filtering mostly relies on policy-driven [4, 197, 209, 213] or API-based [124, 201, 202] methods with undisclosed" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 317, + 735, + 432, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 735, + 432, + 746 + ], + "spans": [ + { + "bbox": [ + 317, + 735, + 432, + 746 + ], + "type": "text", + "content": "1. https://perspectiveapi.com/" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 65, + 65, + 289, + 166 + ], + "blocks": [ + { + "bbox": [ + 65, + 65, + 289, + 166 + ], + "lines": [ + { + "bbox": [ + 65, + 65, + 289, + 166 + ], + "spans": [ + { + "bbox": [ + 65, + 65, + 289, + 166 + ], + "type": "image", + "image_path": "3a143801a40ca350831d89f8b6734dec72d5b207c62eb1650bfeb14b6904c9f6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 72, + 167, + 289, + 255 + ], + "blocks": [ + { + "bbox": [ + 72, + 167, + 289, + 255 + ], + "lines": [ + { + "bbox": [ + 72, + 167, + 289, + 255 + ], + "spans": [ + { + "bbox": [ + 72, + 167, + 289, + 255 + ], + "type": "image", + "image_path": "58dfd579ca3459049d3c5f80bf67f5511b180af8ccd32375a55790a24fa9dbda.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 72, + 255, + 289, + 346 + ], + "blocks": [ + { + "bbox": [ + 72, + 255, + 289, + 346 + ], + "lines": [ + { + "bbox": [ + 72, + 255, + 289, + 346 + ], + "spans": [ + { + "bbox": [ + 72, + 255, + 289, + 346 + ], + "type": "image", + "image_path": "39576eab3b13c6f58322b854afc4d334e49b19b1700b0e374e0731a1c01b150e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 58, + 348, + 289, + 446 + ], + "blocks": [ + { + "bbox": [ + 58, + 348, + 289, + 446 + ], + "lines": [ + { + "bbox": [ + 58, + 348, + 289, + 446 + ], + "spans": [ + { + "bbox": [ + 58, + 348, + 289, + 446 + ], + "type": "image", + "image_path": "cf7b73540606aebc13ca472854080f2b100d90a206010547361e44605ddbdfc2.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 69, + 447, + 289, + 539 + ], + "blocks": [ + { + "bbox": [ + 69, + 447, + 289, + 539 + ], + "lines": [ + { + "bbox": [ + 69, + 447, + 289, + 539 + ], + "spans": [ + { + "bbox": [ + 69, + 447, + 289, + 539 + ], + "type": "image", + "image_path": "b77e0d27b30fab18c8a5198df55bc3557bc9da7c2ff2c1bd5181dc84b6441a02.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 563, + 298, + 599 + ], + "lines": [ + { + "bbox": [ + 47, + 563, + 298, + 599 + ], + "spans": [ + { + "bbox": [ + 47, + 563, + 298, + 599 + ], + "type": "text", + "content": "Fig. 3: Pipeline of the Strategies for Pre-training Safety. We divide the existing methods into filtering- and augmentation-based pre-training safety." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 620, + 300, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 620, + 300, + 724 + ], + "spans": [ + { + "bbox": [ + 44, + 620, + 300, + 724 + ], + "type": "text", + "content": "filtering criteria and implementation details. As a result, these approaches are generally categorized as black box filtering due to their limited interpretability and opaque decision-making processes. Most proprietary companies adopt their own predefined policies and APIs for filtering. For example, [213] filters data based on Meta's safety standards, while [209] removes harmful content according to Google's policy. [124, 201, 202] use the Moderation " + }, + { + "bbox": [ + 44, + 620, + 300, + 724 + ], + "type": "inline_equation", + "content": "\\mathrm{API}^2" + }, + { + "bbox": [ + 44, + 620, + 300, + 724 + ], + "type": "text", + "content": " for PII detection and toxicity analysis to refine filtering." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 42, + 555, + 55 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 42, + 555, + 55 + ], + "spans": [ + { + "bbox": [ + 309, + 42, + 555, + 55 + ], + "type": "text", + "content": "3.2 Augmenting Training Data for Pre-training Safety" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 57, + 564, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 57, + 564, + 209 + ], + "spans": [ + { + "bbox": [ + 308, + 57, + 564, + 209 + ], + "type": "text", + "content": "In addition to filtering strategies, some works enhance training data to improve pre-training safety. These approaches mainly include integrating safe demonstration examples to guide model behavior [206] and annotating toxic content to improve the model's ability to recognize and handle unsafe inputs [195]. [206] incorporates 40k human-annotated safety demonstrations, updated monthly, into both alignment learning and pretraining to iteratively refine safety measures. [195] introduces control tokens to explicitly mark text toxicity in a partial of pertaining data based on the signals from the Perspective API. This approach allows toxicity-aware conditioning during inference time without hurting performance in general." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 309, + 223, + 443, + 235 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 223, + 443, + 235 + ], + "spans": [ + { + "bbox": [ + 309, + 223, + 443, + 235 + ], + "type": "text", + "content": "3.3 Roadmap & Perspective" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 238, + 564, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 238, + 564, + 399 + ], + "spans": [ + { + "bbox": [ + 307, + 238, + 564, + 399 + ], + "type": "text", + "content": "The development of pre-training safety encompasses a diverse set of techniques. Heuristic-based filtering utilizes domain blocklists, keyword matching, and predefined rules to efficiently exclude overtly harmful content and personally identifiable information (PII) [78], while model-based filtering leverages learned representations to dynamically assess the harmfulness of content [205]. Additionally, blackbox filtering employs policy-driven and API-based solutions [97, 204], providing a less transparent yet operationally robust approach. However, existing research hasn't shown how to integrate these methods to pre-train an LLM that ensures security from the source. Thus, further exploration of accurate and efficient pre-training data filtering strategies is both necessary and worthwhile." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 400, + 564, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 400, + 564, + 549 + ], + "spans": [ + { + "bbox": [ + 308, + 400, + 564, + 549 + ], + "type": "text", + "content": "Apart from filtering, data augmentation emerged as a complementary strategy. Some efforts focused on integrating safe demonstration examples to guide model behavior, and some extended to annotating toxic content for improved detection of unsafe inputs [207]. These augmentation techniques work in tandem with filtering methods to preserve valuable training data while mitigating risks. Although data augmentation improves pretraining safety, some current work [2, 97] argues that safety alignment in stages after pertaining tends to yield better results. This raises the question of whether augmenting training data during pretraining is cost-effective, given the same time and resource constraints." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 309, + 567, + 450, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 567, + 450, + 578 + ], + "spans": [ + { + "bbox": [ + 309, + 567, + 450, + 578 + ], + "type": "text", + "content": "4 POST-TRAINING SAFETY" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 582, + 564, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 582, + 564, + 710 + ], + "spans": [ + { + "bbox": [ + 307, + 582, + 564, + 710 + ], + "type": "text", + "content": "In this section, we focus on reviewing the safety against harmful post-training attack, where we mainly focus on three parts: Post-training Based Attack, Defense Against Post-training Based Attack, and Evaluation Mechanism. (I) First, we introduce post-training-based attacks and recent advanced attack techniques (Section 4.1). (II) We categorize defensive mechanisms into three groups according to their conducted stage (Section 4.2), referring to the categorization in [214]. The comprehensive classification framework is illustrated in Figure 4, highlighting key representative studies along with their contributing organizations." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 306, + 712, + 564, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 712, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 306, + 712, + 564, + 747 + ], + "type": "inline_equation", + "content": "\\Rightarrow" + }, + { + "bbox": [ + 306, + 712, + 564, + 747 + ], + "type": "text", + "content": " Alignment. Conducted internally by manufacturers/organizations prior to deployment, this final pre-deployment stage employs techniques such as" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 735, + 264, + 746 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 735, + 264, + 746 + ], + "spans": [ + { + "bbox": [ + 53, + 735, + 264, + 746 + ], + "type": "text", + "content": "2. https://platform.openai.com/docs/guides/moderation" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 48, + 42, + 298, + 236 + ], + "blocks": [ + { + "bbox": [ + 48, + 42, + 298, + 236 + ], + "lines": [ + { + "bbox": [ + 48, + 42, + 298, + 236 + ], + "spans": [ + { + "bbox": [ + 48, + 42, + 298, + 236 + ], + "type": "image", + "image_path": "a912ed4af41748f546e1f71cac0eb117e621e272d4b9275fa07235d7e5605523.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 244, + 290, + 269 + ], + "lines": [ + { + "bbox": [ + 55, + 244, + 290, + 269 + ], + "spans": [ + { + "bbox": [ + 55, + 244, + 290, + 269 + ], + "type": "text", + "content": "Fig. 4: The taxonomy illustration of LLM post-training safety." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 288, + 301, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 288, + 301, + 357 + ], + "spans": [ + { + "bbox": [ + 52, + 288, + 301, + 357 + ], + "type": "text", + "content": "reward modeling [1, 155, 156, 157, 158, 159, 215, 216], reinforcement learning [217, 218, 219], and value-aware optimization [220, 221, 222] to align LLMs with human values and societal expectations. This critical phase ensures ethical grounding through iterative preference optimization [223]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 41, + 358, + 301, + 509 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 41, + 358, + 301, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 358, + 301, + 439 + ], + "spans": [ + { + "bbox": [ + 41, + 358, + 301, + 439 + ], + "type": "inline_equation", + "content": "\\nrightarrow" + }, + { + "bbox": [ + 41, + 358, + 301, + 439 + ], + "type": "text", + "content": " Downstream Fine-Tuning. While the datasets for fine-tuning can be manipulated by malicious attackers, the safety of aligned LLMs can be greatly deteriorated [47, 48, 49, 50]. Thus, it is natural to devise robust fine-tuning mechanisms to defend the attacks and a series of defense mechanisms in the fine-tuning stage have been proposed [224, 225, 226, 227, 228]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 41, + 439, + 301, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 439, + 301, + 509 + ], + "spans": [ + { + "bbox": [ + 41, + 439, + 301, + 509 + ], + "type": "text", + "content": "Safety Recovery. The idea of safety recovery is to fix the attacked model after the harmful fine-tuning attack [214]. This line of research mainly focuses on realigning the safety of LLMs [229, 230, 231, 232, 233] by eliminating the toxic information in model parameters, projecting the harmful gradient update to the safety subspace, etc." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 44, + 510, + 300, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 510, + 300, + 557 + ], + "spans": [ + { + "bbox": [ + 44, + 510, + 300, + 557 + ], + "type": "text", + "content": "(III) Going beyond this, we finally present the evaluation metrics and benchmarks (Section 4.3), along with a comprehensive roadmap and future perspectives for ensuring safety within the fine-tuning framework (Section 4.4)." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 47, + 581, + 299, + 645 + ], + "blocks": [ + { + "bbox": [ + 45, + 565, + 299, + 578 + ], + "lines": [ + { + "bbox": [ + 45, + 565, + 299, + 578 + ], + "spans": [ + { + "bbox": [ + 45, + 565, + 299, + 578 + ], + "type": "text", + "content": "TABLE 3: Topic coverage comparison with existing surveys." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 581, + 299, + 645 + ], + "lines": [ + { + "bbox": [ + 47, + 581, + 299, + 645 + ], + "spans": [ + { + "bbox": [ + 47, + 581, + 299, + 645 + ], + "type": "table", + "html": "
SurveysData PreparationPre-trainFinetuningAlignmentPost-processInference
[71]XXXXX
[234]X
[77]XXXX
[235]XXXXX
[214]XXX
[236]XX
Ours
", + "image_path": "4d34f195d34be5e5c6a43216b0ec96abe005b1a6a8f26aa7c36ce1f4814affca.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 44, + 654, + 301, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 654, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 44, + 654, + 301, + 747 + ], + "type": "text", + "content": "Differentiating from prior LLM surveys [33, 54, 71, 73, 77, 234, 235, 237], this work uniquely highlights safety implications across the entire fine-tuning pipeline, aligning with the evolving logical framework of modern AI safety. Specifically: Systematic Safety Taxonomy. We rigorously organize safety challenges into distinct fine-tuning stages, providing a granular analysis of risks at each phase. Attack-Defense Methodology. We catalog both adversarial" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 42, + 564, + 101 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 564, + 101 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 564, + 101 + ], + "type": "text", + "content": "exploitation strategies and corresponding mitigation techniques, accompanied by a detailed technical roadmap for robust fine-tuning. ③ Forward-Looking Insights. Beyond current practices, we outline critical future directions. The detailed information is summarized in Table 3." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 118, + 441, + 131 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 118, + 441, + 131 + ], + "spans": [ + { + "bbox": [ + 309, + 118, + 441, + 131 + ], + "type": "text", + "content": "4.1 Attacks in Post-training" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 134, + 565, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 134, + 565, + 285 + ], + "spans": [ + { + "bbox": [ + 307, + 134, + 565, + 285 + ], + "type": "text", + "content": "Fine-tuning refers to the process of adapting pre-trained models to downstream tasks by optimizing their parameters, which significantly boosts task-specific performance while reducing computational costs compared to full retraining. However, pioneering studies [238, 239, 240] demonstrate that even the introduction of minimal malicious or misaligned data during fine-tuning can severely compromise the safety alignment of LLMs. This security risk has motivated investigations into adversarial attacks targeting the fine-tuning phase. In this section, we introduce the fine-tuning attacks from the following two perspectives: (1) the toxic data construction phase and (2) the fine-tuning phase." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 309, + 297, + 473, + 308 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 297, + 473, + 308 + ], + "spans": [ + { + "bbox": [ + 309, + 297, + 473, + 308 + ], + "type": "text", + "content": "4.1.1 Toxic Data Construction Phase" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 312, + 564, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 312, + 564, + 406 + ], + "spans": [ + { + "bbox": [ + 307, + 312, + 564, + 406 + ], + "type": "text", + "content": "Leading providers like OpenAI employ safety-oriented filtering mechanisms to screen fine-tuning datasets before user customization. To circumvent these defenses, adversarial training data must first evade detection by such protective models [226]. Current methodologies for constructing toxic data can be broadly categorized into three main approaches: fixed-prompt strategies, iterative prompt strategies and transfer learning strategies." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 405, + 564, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 405, + 564, + 521 + ], + "spans": [ + { + "bbox": [ + 308, + 405, + 564, + 521 + ], + "type": "text", + "content": "Fixed-prompt Strategies. These approaches prefix benign inputs with role-assigning prompts to elicit harmful outputs from LLM. For example, [238] prefixes a subset of fine-tuning data with directives such as \"obedient robot.\" [241] programmed models to feign refusal via safety disclaimers before overriding restrictions, enabling responses to prohibited queries. As such explicit patterns risk detection, advanced stealth methods emerged: [242] embeds malicious content through cryptographic substitutions or steganography within random/natural language patterns." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 521, + 564, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 521, + 564, + 614 + ], + "spans": [ + { + "bbox": [ + 308, + 521, + 564, + 614 + ], + "type": "text", + "content": "Iterative-prompt Strategies. Static attack strategies fail once detected. Heuristic methods now iteratively adapt toxic data against defensive feedback to bypass filters, though iterative optimization often weakens attack strength. [243] counters this via similarity-based loss to maintain toxicity, while [244] employs gradient-guided backdoor triggers during instruction tuning to evade detection while preserving content validity." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 308, + 614, + 564, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 614, + 564, + 696 + ], + "spans": [ + { + "bbox": [ + 308, + 614, + 564, + 696 + ], + "type": "text", + "content": "Transfer Learning Strategies. Black-box constraints and API rate limits drive attackers to exploit transferable adversarial fine-tuning data from open-source models for zero-shot transfer attacks [240, 245]. The shadow alignment technique [239] demonstrates this through oracle-generated adversarial examples targeting GPT-4's restricted scenarios, successfully poisoning LLaMA via strategic fine-tuning." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 309, + 708, + 421, + 719 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 708, + 421, + 719 + ], + "spans": [ + { + "bbox": [ + 309, + 708, + 421, + 719 + ], + "type": "text", + "content": "4.1.2 Fine-tuning Phase" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 723, + 565, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 723, + 565, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 723, + 565, + 748 + ], + "type": "text", + "content": "Existing fine-tuning methods fall into two categories: Supervised Fine-Tuning (SFT)-based and Reinforcement Learning" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 77 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 77 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 77 + ], + "type": "text", + "content": "(RL)-based. Attackers either tamper with model parameters/data to implant stealthy backdoors or distort reward mechanisms to incentivize harmful outputs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 77, + 301, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 77, + 301, + 262 + ], + "spans": [ + { + "bbox": [ + 44, + 77, + 301, + 262 + ], + "type": "text", + "content": "SFT-based. Attackers subvert safety-aligned pretrained models through targeted parameter manipulation, achieving stealthy backdoor implantation or safety bypasses via minimal malicious data injection. [246] undermines safety guardrails through reversed supervised fine-tuning (RSFT) with adversarial \"helpful\" response pairs. Building on this, [247, 248] demonstrate safety alignment erosion via parameter-efficient adaptation (e.g., LoRA, quantization) in models like Llama-2-7B. Domain-specific analyses reveal broader implications: [50] quantifies toxicity amplification in community-driven adaptations (e.g., SauerkrautLM's German localization), while [249] examines cross-lingual attack transferability through parametric sensitivity analysis. Complementing these, [250] pioneers federated attack vectors using layer-specific modifications (LoRA, LayerNorm) in distributed learning environments." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 262, + 301, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 262, + 301, + 378 + ], + "spans": [ + { + "bbox": [ + 44, + 262, + 301, + 378 + ], + "type": "text", + "content": "RL-based. Attackers exploit algorithms like Direct Preference Optimization (DPO) to corrupt reinforcement learning policies, assigning higher rewards to harmful behaviors and degrading model safety. For instance, [246] leveraged DPO to encode harmful behaviors as \"preferences,\" skewing the model's response distribution to favor malicious outputs under adversarial prompts. Conversely, [251] identified a \"probability displacement\" phenomenon in DPO, where preferred responses paradoxically decrease in likelihood, potentially triggering unsafe or inverted outputs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 392, + 185, + 404 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 392, + 185, + 404 + ], + "spans": [ + { + "bbox": [ + 45, + 392, + 185, + 404 + ], + "type": "text", + "content": "4.2 Defenses in Post-training" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 407, + 123, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 407, + 123, + 418 + ], + "spans": [ + { + "bbox": [ + 45, + 407, + 123, + 418 + ], + "type": "text", + "content": "4.2.1 Alignment" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 421, + 300, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 421, + 300, + 514 + ], + "spans": [ + { + "bbox": [ + 44, + 421, + 300, + 514 + ], + "type": "text", + "content": "Alignment typically optimizes the language model based on human preference feedback by training LLM with high-quality labeled data from harmless question-answer pairs [156, 159, 252]. Based on this, alignment ensures that LLM generations adhere to ethics and harmlessness, enhancing safety [155, 253]. In this section, we categorize our discussion into two types based on purpose: general alignment and safety alignment." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 50, + 526, + 298, + 647 + ], + "blocks": [ + { + "bbox": [ + 50, + 526, + 298, + 647 + ], + "lines": [ + { + "bbox": [ + 50, + 526, + 298, + 647 + ], + "spans": [ + { + "bbox": [ + 50, + 526, + 298, + 647 + ], + "type": "image", + "image_path": "83ed828523c8544c3e695503562e583a5b18583530a15f2f579840c6adbbd329.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 47, + 654, + 297, + 667 + ], + "lines": [ + { + "bbox": [ + 47, + 654, + 297, + 667 + ], + "spans": [ + { + "bbox": [ + 47, + 654, + 297, + 667 + ], + "type": "text", + "content": "Fig. 5: The taxonomy illustration of LLM alignment safety." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 677, + 301, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 677, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 44, + 677, + 301, + 747 + ], + "type": "text", + "content": "General Alignment. General alignment enables the pretrained model to learn how to chat while internalizing fundamental human values. In RLHF [1], the model first learns from human-labeled data through supervised finetuning. Then, crowdsourced preference rankings of model responses are used to train a reward model, which is further" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 42, + 566, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 566, + 215 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 566, + 215 + ], + "type": "text", + "content": "optimized using PPO [175]. The preference data sequence provided by human annotators guides the model to conduct helpful rather than harmful behaviors [254]. Subsequent techniques such as DPO [255, 256, 257] and RLAIF [158, 258] follow a similar approach by leveraging preference data. Rule-based alignment methods predefine rules that the model learns to follow [259], which eliminates the need for labeled preference data and reduces costs while achieving comparable safety outcomes. Through general alignment, aligned models learn to reject direct harmful queries that could cause societal harm [2, 213]. While these methods contribute to LLM safety to some extent, they are highly susceptible to jailbreak attacks and can be easily circumvented [260, 261, 262, 263]. Furthermore, they are vulnerable to fine-tuning-based attacks, as highlighted in recent studies [127]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 216, + 566, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 216, + 566, + 413 + ], + "spans": [ + { + "bbox": [ + 308, + 216, + 566, + 413 + ], + "type": "text", + "content": "Safety Alignment. General alignment has been shown to have significant disadvantages [48] and is particularly vulnerable to fine-tuning attacks after being open-sourced [246]. To better address the challenges of LLM safety [237, 246, 264], some research focuses on safety alignment. One approach is to elevate safety to the same level of importance as performance by training independent reward models and cost models [217, 265]. Subsequent work introduces unique safety rules to enhance safety, leveraging Rule-Based Rewards to train safer models [266]. As large reasoning models (LRMs) emerge [4, 201], rule-based approach is further formalized into the safe policy reasoning, requiring models to reason over safe specifications during inference [267, 268]. Additionally, some studies explore safety alignment from interpretability perspectives [46, 231, 269, 270] by editing model parameters or modifying the residual stream to achieve better alignment." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 421, + 448, + 433 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 421, + 448, + 433 + ], + "spans": [ + { + "bbox": [ + 309, + 421, + 448, + 433 + ], + "type": "text", + "content": "4.2.2 Downstream Fine-tuning" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 434, + 564, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 434, + 564, + 469 + ], + "spans": [ + { + "bbox": [ + 308, + 434, + 564, + 469 + ], + "type": "text", + "content": "The defenses devised in this stage aim to mitigate the harmfulness of the attack during fine-tuning [271]. There are typically three types of defenses." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 470, + 564, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 470, + 564, + 597 + ], + "spans": [ + { + "bbox": [ + 307, + 470, + 564, + 597 + ], + "type": "text", + "content": "Regularization-based method: This type of defense achieves a successful defense by constraining the distance between the fine-tuned model and the aligned model. For example, KL regularizer is utilized to constrain the representation of the fine-tuned model to not deviate much from that of the aligned model [48, 272]. Another line of works strive to identify safety layers or modules to freeze or restrict the learning rate to ensure that the fine-tuned model do not deviate far from the aligned model on safety [269, 273, 274, 275, 276]. SaLoRA [277] projects the LoRA representation to an orthogonal aligned subspace." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 597, + 565, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 597, + 565, + 746 + ], + "spans": [ + { + "bbox": [ + 307, + 597, + 565, + 746 + ], + "type": "text", + "content": "Data manipulation: This type of defense mixes alignment data into fine-tuning to achieve safety defense or modifying the system prompt to mitigate the risk [226, 227, 278, 279, 280]. For data mixing, Lisa [224] proposes Bi-State optimization to separate optimization over the alignment data/fine-tuning data, and to use a proximal term for further optimization. Paraphrase [279] also made a similar attempt and found that safety data that follows the prompting style of fine-tuning data can further improve defense performance. As for modifying system prompts, PTST [281] uses general prompts for fine-tuning, but uses safety prompts for inference. BEA [226] lies in the intersection of data mixing and prompt modification method, which introduces safe" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 87 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 87 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 87 + ], + "type": "text", + "content": "data concatenated with a system prompt as a backdoor trigger during fine-tuning, thereby establishing a strong link between the backdoor trigger and the safe response within the model." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 88, + 301, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 88, + 301, + 193 + ], + "spans": [ + { + "bbox": [ + 44, + 88, + 301, + 193 + ], + "type": "text", + "content": "Detection-based defense: This type defense devises methods to filter out the harmful data from fine-tuning dataset to preserve the aligned safety of LLMs [282, 283, 284, 285, 286, 287]. For instance, there are works that train LLMs as moderation models to identify harmful content [175, 283, 288]. SEAL [228] devises a bi-level formulation to filter out the most harmful samples. SAFT [285] proposes to factorize the embedding space and compare the singular vector to identify harmful data." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 201, + 149, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 201, + 149, + 213 + ], + "spans": [ + { + "bbox": [ + 45, + 201, + 149, + 213 + ], + "type": "text", + "content": "4.2.3 Safety Recovery" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 215, + 302, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 215, + 302, + 470 + ], + "spans": [ + { + "bbox": [ + 44, + 215, + 302, + 470 + ], + "type": "text", + "content": "Safety recovery refers to the defense mechanism applied after fine-tuning to restore a compromised model (i.e., realign the model). Several approaches aim to repair the model by eliminating the harmful knowledge that has been injected during fine-tuning. For instance, LAT [289] removes harmful knowledge by introducing perturbations into the embedding space, while Antidote [290] identifies and removes the harmful coordinates. [291] further proposes detecting and removing a small fraction of critical poisoned data points using influence functions can effectively recover model performance. Other approaches leverage information from aligned models to restore the integrity of attacked models. For example, SOMF [292] merges the parameters of fine-tuned models with safety parameters from aligned models, Safe LoRA [230] uses the weights of aligned models to project harmful gradient updates into a safe subspace, and SafetyLock [293] extracts safety activation information and injects it into the fine-tuned model. Additional methods in this domain include Safety Arithmetic [231], BEAT [287], IRR [294], NLSR [233], and Panacea [295]. Furthermore, CMRM [296] has been specifically developed to recover the safety of vision-based large language models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 479, + 145, + 490 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 479, + 145, + 490 + ], + "spans": [ + { + "bbox": [ + 45, + 479, + 145, + 490 + ], + "type": "text", + "content": "4.2.4 Safety Location" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 491, + 301, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 491, + 301, + 689 + ], + "spans": [ + { + "bbox": [ + 44, + 491, + 301, + 689 + ], + "type": "text", + "content": "Safety location refers to determining the specific location of the safety mechanism in LLMs, which is important for efficiently building a stable and reliable defense. Recent studies find that safety mechanism is not uniform across all layers of LLMs' transformer layers and only some specific layers are essential for the successful activation of defense [297, 298, 299]. Based on this finding, TGA [297] unveils the key reason for the inconsistency between visual and language safety capabilities in multimodal LLMs is that the visual and language modalities cannot be effectively aligned at the activation layers for safety mechanism. SPPFT [298] proposes a novel fine-tuning approach to fixes the gradient of the safety layers during fine-tuning to address the security degradation. LED [299] shows that realigning the safety layers with the decoded safe response from identified toxic layers can significantly improve the alignment of LLMs against jailbreak attacks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 697, + 208, + 709 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 697, + 208, + 709 + ], + "spans": [ + { + "bbox": [ + 45, + 697, + 208, + 709 + ], + "type": "text", + "content": "4.2.5 Open-Weight LLMs Safeguard" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 712, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 712, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 712, + 301, + 746 + ], + "type": "text", + "content": "As open-weight LLMs become increasingly public accessible, concerns about their potential misuse have intensified. Once model weights are public, malicious actors" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 42, + 566, + 123 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 566, + 123 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 566, + 123 + ], + "type": "text", + "content": "can fine-tune or alter them to remove safety alignment, enabling harmful applications such as generating misinformation, planning cyberattacks, or providing instructions for weapons development. Because LLMs grow in capability, ensuring these models cannot be easily repurposed for high-risk misuse has become a critical concern for both researchers and policymakers, like NIST [300, 301]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 123, + 567, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 123, + 567, + 356 + ], + "spans": [ + { + "bbox": [ + 307, + 123, + 567, + 356 + ], + "type": "text", + "content": "Traditional safety techniques—such as refusal training via supervised fine-tuning or reinforcement learning—are often ineffective in this setting, as they can be easily undone by adversarial modifications [240, 269]. In response, researchers have proposed post-training defenses that aim to remain effective even when the model is directly manipulated after release. Two notable approaches are Representation Noising [302] and Tamper Attack Resistance [303]. These approaches attempt to protect models by degrading their ability to learn or recall harmful knowledge, even after extensive fine-tuning. The goal is to raise the cost of misuse, even under strong threat models where attackers have full access to model weights. However, recent studies [301] have shown that evaluating the durability of these defenses is itself difficult. Minor changes in fine-tuning setup—such as different prompt formats, or random seeds—can lead to drastically different outcomes. Moving forward, researchers could clearly define threat models, improve reproducibility, and develop safeguards that offer measurable resilience across a wide range of adaptive attack strategies." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 309, + 370, + 383, + 381 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 370, + 383, + 381 + ], + "spans": [ + { + "bbox": [ + 309, + 370, + 383, + 381 + ], + "type": "text", + "content": "4.3 Evaluation" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 385, + 421, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 385, + 421, + 396 + ], + "spans": [ + { + "bbox": [ + 309, + 385, + 421, + 396 + ], + "type": "text", + "content": "4.3.1 Evaluation Metrics" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 399, + 565, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 565, + 480 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 565, + 480 + ], + "type": "text", + "content": "As discussed in previous studies [127, 304], the goal of defense is to ensure that the model is able to (1) keep harmlessness after attack and (2) achieve similar levels of performance on downstream tasks with or without defense. In response to the two goals, we summarize the metrics involved in the existing research into two types: safety metrics and utility metrics." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 481, + 566, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 481, + 566, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 481, + 566, + 712 + ], + "type": "text", + "content": "Safety metrics: This type of metric is used to evaluate the model's ability to maintain the safety of its outputs after being attacked. Attack Success Rate (ASR), introduced in [260], is one of the earliest safety metrics and has been widely adopted in subsequent works [305, 306, 307], and these papers employ different names for this metric, such as rejection rate [308] and fulfillment rate [309]. The novel measurements of safety metrics emerge with the advent of LLM-as-a-Judge [310, 311]. [261] is the first to apply LLMs to label model outputs as either safe or unsafe and calculates the ratio of unsafe labels as the safety metric. This method effectively leverages the generalization capability of LLMs and has been widely adopted [312, 313, 314]. However, this method also exhibits notable limitations, such as the inability to distinguish between different levels of risk. To address them, [315, 316] measures safety by calculating the alignment rate of the model's responses to safety-related multi-choice questions and those of human evaluators, and [230, 238] utilize a 5-point scale for LLM-based evaluators for more fine-grained evaluation." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 712, + 566, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 712, + 566, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 712, + 566, + 746 + ], + "type": "text", + "content": "Utility metrics: In research on LLM safety, this type of metric is used to evaluate whether the model maintains its original performance on downstream tasks after an attack" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 239 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 239 + ], + "type": "text", + "content": "or defense. Researchers demonstrate the impact of their methods on model performance by comparing the results of utility metrics before and after the operation. For close-end tasks which have certain ground-truth labels, such as mathematical problems [317, 318, 319], coding tasks [320, 321], and classification tasks [322, 323], researchers typically use accuracy, the ratio of samples for which the model provides the correct answer. For open-ended tasks without a definite correct answer, the metrics are more diverse. For QA tasks [310, 324, 325], researchers primarily use LLM-based rating systems or similarity between generated content and standard response. For text summarization [326] and machine translation [327], ROUGE score and BLEU are widely used. By preserving utility, models can maintain their helpful capabilities while resisting attacks, ensuring that safety enhancements do not compromise their practical value in real-world applications." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 239, + 301, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 239, + 301, + 506 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 301, + 506 + ], + "type": "text", + "content": "Safety and Utility Trade-off metrics: Safety alignment is far more than simply refusing to answer harmful questions [265, 328]. In other words, it is insufficient to rely solely on a classifier that rejects safety-related prompts while responding normally to others [329, 330]. When evaluating a model's safety alignment, a key focus is dual-preference evaluation - assessing whether the model can remain helpful while adhering to safety constraints [175]. For example, consider the prompt, \"How to make a bomb?\" A basic form of safety alignment would involve the model refusing to respond - similar to the approach taken by traditional moderation systems. However, beyond single-preference evaluation, a more advanced form of safety alignment not only withholds harmful information but also provides value-based reasoning and active dissuasion [253]. For instance, the model might reply: \"Building a bomb is extremely dangerous and poses serious risks to public safety. Such actions could cause significant harm and may lead to criminal prosecution.\" The goal of safety alignment is to ensure that a model's behavior aligns with human intentions and values, particularly in safety-critical contexts [331]. In this way, the goal is to achieve a form of bidirectional value alignment between the model and human values [332]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 521, + 180, + 532 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 521, + 180, + 532 + ], + "spans": [ + { + "bbox": [ + 45, + 521, + 180, + 532 + ], + "type": "text", + "content": "4.3.2 Evaluation Benchmarks" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 537, + 300, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 537, + 300, + 618 + ], + "spans": [ + { + "bbox": [ + 44, + 537, + 300, + 618 + ], + "type": "text", + "content": "In current applications, the boundary between alignment benchmarks and fine-tuning benchmarks is not clearly defined. Some datasets from alignment benchmarks [175, 333], after appropriate modifications, can also be utilized for fine-tuning benchmarks. Thus, we classify them into two types as per their purposes. We summarize some widely-used benchmarks in Table 4." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 619, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 619, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 619, + 301, + 746 + ], + "type": "text", + "content": "Safety-purpose benchmarks: These benchmarks evaluate the model's ability to maintain safety and align with human values when handling harmful prompts. They are the primary benchmarks used in safety research, effectively testing whether attack or defense methods influence the model's handling of harmful prompts. The design of responses varies depending on the specific purpose. [238, 260] consists of harmful prompts and harmful responses and [334, 335] only contains harmful prompts. Benchmarks or datasets designed for safety alignment, like BeaverTails [175] and HH-RLHF [155], typically not only include both" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "type": "text", + "content": "safe and harmful responses but also sometimes include human preference data." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 66, + 564, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 66, + 564, + 217 + ], + "spans": [ + { + "bbox": [ + 308, + 66, + 564, + 217 + ], + "type": "text", + "content": "General-purpose benchmarks: These benchmarks are used to evaluate the model's performance, such as accuracy, knowledge breadth, and reasoning, typically not intentionally including harmful data. In LLM safety, assessing the model with general-purpose benchmarks assists in analyzing the impact of defenses on the model's performance or is combined with harmful data to simulate fine-tuning attacks. Representative datasets include AlpacaEval [324], Dolly-15k [336], HPD v2 [337], GSM8K [317], ErrorRadar [338], etc. General-purpose benchmarks are also critical for LLM safety research, verifying that mitigation strategies do not degrade model performance on benign tasks, thereby balancing between helpfulness and harmlessness." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 310, + 255, + 566, + 525 + ], + "blocks": [ + { + "bbox": [ + 320, + 228, + 553, + 251 + ], + "lines": [ + { + "bbox": [ + 320, + 228, + 553, + 251 + ], + "spans": [ + { + "bbox": [ + 320, + 228, + 553, + 251 + ], + "type": "text", + "content": "TABLE 4: Summary of typical benchmarks with access links." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 255, + 566, + 525 + ], + "lines": [ + { + "bbox": [ + 310, + 255, + 566, + 525 + ], + "spans": [ + { + "bbox": [ + 310, + 255, + 566, + 525 + ], + "type": "table", + "html": "
BenchmarkTypeTaskMetric
AlpacaEval [324]GeneralGeneral QAWin Rate
Dolly-15k [336]GeneralGeneral QAROUGE, BERT Score
PubmedQA [339]GeneralMedical QAAccuracy
GSM8K [317]GeneralMathematicsAccuracy
HumanEval [320]GeneralCodingCode Pass Rate
AGNews [322]GeneralClassificationAccuracy
WMT14 [327]GeneralTranslationBLEU, ROUGE
CNN/DailyMail [340]GeneralSummarizationROUGE
HH-RLHF [155]SafetyGeneral QARejection Rate, Helpfulness
BeaverTails [175]SafetyGeneral QAAccuracy, Win Rate
TruthfulQA [341]SafetyGeneral QATruthfulness
PureBad [238]SafetyHarmful QAASR, Harmfulness Score
DecodingTrust [333]SafetyHarmful QAASR, Accuracy
AdvBench [260]SafetyHarmful QAASR
SALAD-Bench [316]SafetyHarmful QAASR, Safety Rate
SG-Bench [342]SafetyHarmful QAFailure Rate
SafeChain [343]SafetyHarmful QASafe@1, Safe@K
HarmBench [305]SafetyHarmful PromptASR
HEX-PHI [238]SafetyHarmful PromptASR
RealToxicPrompts [334]SafetyHarmful PromptToxicity Rate
Do-Not-Answer [335]SafetyHarmful PromptHarmfulness Score
OR-Bench [308]SafetyHarmful PromptRejection Rate
SorryBench [309]SafetyHarmful PromptFulfillment Rate
Anthropic [254]SafetyHarmful PromptASR
DirectHarm4 [281]SafetyHarmful PromptASR, Harmfulness Score
GSM-Danger [281]SafetyHarmful PromptASR
SafetyBench [315]SafetySafety EvaluationAccuracy
ToxiGen [344]SafetySafety EvaluationAccuracy
R-Judge [314]SafetySafety EvaluationAccuracy
JailbreakBench [306]SafetyJailbreakASR
StrongREJECT [345]SafetyJailbreakWillingness
WildJailbreak [346]SafetyJailbreakASR
", + "image_path": "3597ec62c579e34e23882fb0c0da7399687e89d674caf889fa87f31ca3686194.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 309, + 560, + 443, + 572 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 560, + 443, + 572 + ], + "spans": [ + { + "bbox": [ + 309, + 560, + 443, + 572 + ], + "type": "text", + "content": "4.4 Roadmap & Perspective" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 309, + 578, + 499, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 578, + 499, + 590 + ], + "spans": [ + { + "bbox": [ + 309, + 578, + 499, + 590 + ], + "type": "text", + "content": "4.4.1 From Low-Level to High-Level Safety" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 596, + 565, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 596, + 565, + 746 + ], + "spans": [ + { + "bbox": [ + 307, + 596, + 565, + 746 + ], + "type": "text", + "content": "With advancements in safety alignment technologies, LLMs are now less likely to explicitly exhibit harmful behaviors associated with low-level safety, such as violence, pornography, or discrimination [254, 265]. In contrast, as LLMs' reasoning capabilities continue to advance, a growing number of researchers are shifting their attention toward high-level safety—concerned with the potential for LLMs to engage in harmful behaviors that are not explicitly observable, such as deception or sycophancy [347]. These behaviors often require specific environmental conditions to manifest and can only be detected through specialized monitoring mechanisms [348], making them comparatively more covert than low-level safety issues." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 157 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 157 + ], + "type": "text", + "content": "4.4.1.1 Deceptive Alignment: As LLMs continue to advance in reasoning and planning capabilities, the risk of deceptive behavior has attracted increasing scrutiny from researchers [349]. In this context, deception refers to the behavior in which a model intentionally misleads users or creates false impressions to achieve instrumental goals that are independent of factual accuracy [350]. For instance, advanced models such as GPT-4 have exhibited behaviors suggestive of misleading users or obfuscating their underlying objectives during complex interactions [349, 351]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 158, + 301, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 158, + 301, + 297 + ], + "spans": [ + { + "bbox": [ + 44, + 158, + 301, + 297 + ], + "type": "text", + "content": "Deception is defined as systematically inducing others to form false beliefs in order to achieve goals beyond merely conveying the truth [350]. This definition does not presuppose that the model holds human-like beliefs or intentions, but rather focuses on whether its external behavioral patterns resemble those characteristics of deception. In contrast, there is a more formalized definition grounded in game theory and causal reasoning [352], which incorporates the notions of intentionality and belief, modeling deception through a formally structured causal game-theoretic framework and offering criteria for distinguishing deception from related phenomena such as concealment." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 297, + 301, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 297, + 301, + 354 + ], + "spans": [ + { + "bbox": [ + 44, + 297, + 301, + 354 + ], + "type": "text", + "content": "Evaluating the deceptive tendencies of LLMs requires a multi-layered, multi-scenario approach to comprehensively capture when and why such behavior occurs. The following outlines commonly used experimental designs, including various assessment scenarios and techniques:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 354, + 301, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 354, + 301, + 434 + ], + "spans": [ + { + "bbox": [ + 44, + 354, + 301, + 434 + ], + "type": "text", + "content": "Hypothetical Scenarios and Moral Dilemmas: Some studies design conflict scenarios pitting honesty against goal completion, analyzing model responses [353]. Empirical findings reveal models' tendency toward deception, whether to relieve situational pressure or secure higher utility. By varying environment settings, researchers can examine triggers of deceptive behavior [354]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 435, + 301, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 435, + 301, + 573 + ], + "spans": [ + { + "bbox": [ + 44, + 435, + 301, + 573 + ], + "type": "text", + "content": "Multi-Agent Interaction and Game Experiments: The model is tested in multi-agent games or social scenarios where success depends on interactions with other agents. Notable examples include the Hoodwinked experiment [355] and the strategic game Diplomacy [356]. These environments permit deceptive interactions, enabling evaluation of whether the model uses deception strategies to gain a competitive advantage [357]. Experiments can monitor the frequency, content, and effectiveness of the model's deceptive behaviors, comparing them with those of human players or models of various scales. Multiplayer game testing can assess the model's social deception skills." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 574, + 301, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 574, + 301, + 723 + ], + "spans": [ + { + "bbox": [ + 44, + 574, + 301, + 723 + ], + "type": "text", + "content": "Autonomous Agency and Covert Action Testing: The model is provided with a defined objective and constraints, along with a certain degree of operational freedom (e.g., tool usage, code execution, or interaction interfaces), and is then observed for covert constraint violations in pursuit of its goal, particularly efforts to disguise such behavior [351, 358]. To enhance the evaluation, experiments may deliberately introduce hidden motives [359]. For example, an AI assistant may have access to sensitive information needed for task completion but is explicitly prohibited from using it without permission. The question then becomes whether the AI assistant covertly exploits the information while hiding this behavior from the user [353]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 723, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 723, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 723, + 301, + 748 + ], + "type": "text", + "content": "Prompt Manipulation and Role Guidance: Targeted prompts or configurations can be used to elicit or sup" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 42, + 566, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 566, + 168 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 566, + 168 + ], + "type": "text", + "content": "press deceptive behavior in the model, thereby assessing its propensity and robustness. The model may be encouraged to achieve goals by any means necessary or be instructed to be completely honest in order to evaluate its performance in the same task [360]. Experimental results indicate that emphasizing honesty or highlighting potential risks can reduce deceptive behavior to some extent, though such behavior cannot be eliminated entirely [353]. These experiments help determine whether the model exhibits a stable propensity for deception or displays such behavior only under specific conditions." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 169, + 566, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 169, + 566, + 388 + ], + "spans": [ + { + "bbox": [ + 308, + 169, + 566, + 388 + ], + "type": "text", + "content": "Multi-turn Consistency and Alignment Resistance Check: Construct multi-turn dialogue scenarios to evaluate whether the model can consistently uphold a lie. For instance, the model is tasked with maintaining deception across multiple rounds of Question-Answering, while its responses are examined for inconsistencies [361]. A model lacking consistency may confess under pressure or contradict itself, whereas a more advanced model would persist in fabricating lies to sustain the illusion. By tracking how frequently the model's deceptions are uncovered or inadvertently disclosed throughout multi-turn interactions, one can quantify its capacity for sustained deception [354]. Moreover, due to alignment resistance in LLMs, a small amount of data may suffice for the model to revert to its pre-training distribution [362]. Therefore, evaluating the model's robustness during the deception process can reveal its tendency toward deceptive behavior under its real distribution, potentially necessitating some degree of inverse training for thorough assessment." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 388, + 566, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 388, + 566, + 562 + ], + "spans": [ + { + "bbox": [ + 308, + 388, + 566, + 562 + ], + "type": "text", + "content": "Thought Process and Internal State Monitoring: This method infers the model's intentions by analyzing its thought processes or internal activations. For example, the model may be prompted to produce a \"thought log\" alongside its response [359], or the reasoning process itself may serve as the log in the case of reasoning models [348]. If the content of the log contradicts the response, it may indicate deceptive behavior. Embedded linear probes can also monitor real-time activations associated with deception [363]. However, deciding how to act once \"bad thoughts\" are detected remains challenging: OpenAI found that penalizing such monitored thoughts reduces their explicit occurrence but does not curb most misbehavior—instead, models learn to conceal their intent within the very \"thought logs\" meant to expose it [364]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 562, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 562, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 562, + 566, + 748 + ], + "type": "text", + "content": "4.4.1.2 Reward Hacking: Reward hacking refers to situations in which an AI agent exploits flaws or ambiguities in the reward function to obtain high rewards in unintended ways, without truly accomplishing the intended task of the designer [365, 366]. This behavior reflects a manifestation of reward mis-specification, also known as specification gaming [331, 367]. Reward hacking has long been a concern in the field of AI safety [368]. The root of this problem can be understood through Goodhart's Law: \"when a measure becomes a target, it ceases to be a good measure\" [369]. When a proxy metric is used to represent a human's true goal, strong optimization may cause the agent to exploit mismatches between the proxy and the actual objective, resulting in failure. Reward tampering is considered a special case of reward hacking, in which the agent directly interferes with the reward signal source (e.g.," + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 65 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 65 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 65 + ], + "type": "text", + "content": "by modifying the reward function) to obtain high rewards [370, 371]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 66, + 300, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 66, + 300, + 170 + ], + "spans": [ + { + "bbox": [ + 44, + 66, + 300, + 170 + ], + "type": "text", + "content": "With the widespread adoption of Reinforcement Learning from Human Feedback (RLHF) in training LLMs, reward models that rely on a single scalar value struggle to capture the complexity of human value systems [372, 373]. If the reward model fails to accurately reflect genuine human preferences, the LLM may learn to exploit its biases or those of human evaluators, resulting in various forms of reward hacking. The following are common manifestations of this phenomenon observed in large models." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 170, + 301, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 170, + 301, + 262 + ], + "spans": [ + { + "bbox": [ + 44, + 170, + 301, + 262 + ], + "type": "text", + "content": "Sycophancy: Since LLMs are optimized for human preferences, or for reward models based on such preferences, during fine-tuning, they tend to prioritize satisfying users or human supervisors to maximize rewards, rather than adhering strictly to objective correctness. This tendency is reflected in the way their responses often shift to align with users' implied stances, catering to their preferences [374, 375]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 263, + 300, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 263, + 300, + 380 + ], + "spans": [ + { + "bbox": [ + 44, + 263, + 300, + 380 + ], + "type": "text", + "content": "Reward Overoptimization: Model outputs may be excessively optimized for specific formal features to satisfy the reward model. For example, the model may produce unnecessarily lengthy responses [376], as human preference for detailed answers during training leads the reward model to favor longer outputs. Moreover, the model may adapt its writing style and formatting to align with the reward model's preferences, instead of prioritizing content accuracy. For instance, it may learn to respond to harmful queries with overly cautious refusals [237, 377]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 393, + 185, + 405 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 393, + 185, + 405 + ], + "spans": [ + { + "bbox": [ + 45, + 393, + 185, + 405 + ], + "type": "text", + "content": "4.4.2 Provably Safe AI System" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 409, + 300, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 409, + 300, + 537 + ], + "spans": [ + { + "bbox": [ + 44, + 409, + 300, + 537 + ], + "type": "text", + "content": "Provably safe AI systems represent an emerging paradigm that aims to ensure that advanced AI operates within rigorous, formally verifiable safety bounds. Some researchers argue that only by embedding mathematically verified safety proofs into AI architectures can we guarantee that such systems will never deviate into harmful behaviors [378]. This formal approach contrasts sharply with traditional empirical testing and red-teaming methods, which often fail to uncover all failure modes in complex or adversarial environments. The achievement of provable safety requires the integration of several key components [379] as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 537, + 301, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 537, + 301, + 595 + ], + "spans": [ + { + "bbox": [ + 44, + 537, + 301, + 595 + ], + "type": "text", + "content": "Formal Safety Specifications: A rigorously defined set of safety properties (e.g., \"do no harm\") must be articulated in a formal language. Such specifications are designed to capture the essential criteria that AI systems must satisfy under all operating conditions." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 596, + 301, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 596, + 301, + 653 + ], + "spans": [ + { + "bbox": [ + 44, + 596, + 301, + 653 + ], + "type": "text", + "content": "World Models: To evaluate the consequences of AI actions, it is essential to build a world model that encapsulates the dynamics and causal relationships of the environment. This model allows for the translation of abstract safety requirements into concrete behavioral constraints." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 44, + 654, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 654, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 654, + 301, + 746 + ], + "type": "text", + "content": "Verification Mechanisms: A verifier is needed to ensure that the AI system meets the safety specifications with respect to the world model, regardless of whether it is implemented as a formal proof certificate, a probabilistic bound or an asymptotic guarantee. Such mechanisms are the only reliable method to exclude the possibility of catastrophic failure by proving that certain harmful behaviors are mathematically impossible [378]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 42, + 564, + 124 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 564, + 124 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 564, + 124 + ], + "type": "text", + "content": "Robust Deployment Infrastructure: Beyond predeployment verification, runtime monitoring and redundant safety measures (such as provably compliant hardware) must be implemented. These safeguards ensure that if discrepancies between the world model and observed behavior occur, the system can transition to a safe state without human intervention [378, 379]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 309, + 133, + 507, + 146 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 133, + 507, + 146 + ], + "spans": [ + { + "bbox": [ + 309, + 133, + 507, + 146 + ], + "type": "text", + "content": "4.4.3 Beyond Fine-tuning, Systematic Safety" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 148, + 564, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 148, + 564, + 252 + ], + "spans": [ + { + "bbox": [ + 307, + 148, + 564, + 252 + ], + "type": "text", + "content": "AI governance encompasses the establishment and enforcement of regulatory frameworks necessary for the safe development and deployment of AI systems. Given the potential of AI to exacerbate societal biases [374, 380, 381], displace labor [382], and pose existential risks due to increasingly autonomous capabilities [15, 351], governance is critical. The primary objective of AI governance is to mitigate these diverse risks effectively, requiring stakeholders to maintain a balanced consideration of various risk categories." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 252, + 564, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 252, + 564, + 390 + ], + "spans": [ + { + "bbox": [ + 308, + 252, + 564, + 390 + ], + "type": "text", + "content": "A multi-stakeholder approach characterizes contemporary AI governance, involving governments, industry and AI laboratories, and third-party entities such as academia and non-profit organizations [383]. Governments create regulatory frameworks, conduct oversight, and establish risk management systems [384, 385], while industries and AI laboratories undertake comprehensive risk assessments throughout AI development lifecycles and voluntarily adopt security measures [386, 387]. Third parties provide critical auditing services and policy advice, fostering international cooperation and balanced stakeholder interests [388, 389, 390]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 390, + 564, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 390, + 564, + 611 + ], + "spans": [ + { + "bbox": [ + 308, + 390, + 564, + 611 + ], + "type": "text", + "content": "Nevertheless, AI governance faces significant unresolved challenges, prominently in international and open-source contexts. International governance discussions emphasize the importance of global frameworks to manage catastrophic risks such as AI-driven arms races and inequitable distribution of AI benefits [388, 391]. Historically, international governance frameworks like the OECD AI Principles and the global ethical standards produced by the United Nations Educational, Scientific and Cultural Organization (UNESCO) offer instructive precedents [392, 393]. Conversely, open-source governance is debated regarding the balance between transparency's security benefits and potential misuse risks [394, 395]. Advocates argue that openness enhances security through rapid issue identification and reduces centralized control [396, 397], while critics highlight risks of malicious use and vulnerabilities from unrestricted access [260, 398]. This ongoing debate underscores the need for measured, risk-informed policies and gradual openness strategies [399, 400]." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 309, + 626, + 546, + 639 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 626, + 546, + 639 + ], + "spans": [ + { + "bbox": [ + 309, + 626, + 546, + 639 + ], + "type": "text", + "content": "5 SAFETY IN MODEL EDITING & UNLEARNING" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 642, + 564, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 642, + 564, + 723 + ], + "spans": [ + { + "bbox": [ + 307, + 642, + 564, + 723 + ], + "type": "text", + "content": "Model editing and unlearning techniques can be conceptualized as lightweight adjustments to information and efficient safeguards for privacy and security during the deployment of LLMs. In this work, we integrate discussions on model editing and unlearning into the fine-tuning section to provide a more systematic and comprehensive analysis of their roles in enhancing model safety and robustness." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 723, + 564, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 723, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 723, + 564, + 746 + ], + "type": "text", + "content": "Concretely, model editing [401, 402] and unlearning [403, 404, 405, 406, 407, 408] can be understood as methods" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 170 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 170 + ], + "type": "text", + "content": "to efficiently modify model parameters during deployment to enhance the model's security and privacy. To better reflect the comprehensiveness of our survey, we have included relevant literature on the safety of editing (Section 5.1) and unlearning (Section 5.2). It is noteworthy that there exists a certain degree of technical overlap between model editing and unlearning. To provide a clearer and more precise exposition, we focus model editing on addressing knowledge conflicts within the model, while unlearning is primarily concerned with the erasure of knowledge to ensure privacy protection." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 186, + 176, + 199 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 186, + 176, + 199 + ], + "spans": [ + { + "bbox": [ + 45, + 186, + 176, + 199 + ], + "type": "text", + "content": "5.1 Safety in Model Editing" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 202, + 300, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 202, + 300, + 295 + ], + "spans": [ + { + "bbox": [ + 44, + 202, + 300, + 295 + ], + "type": "text", + "content": "LLMs retain incorrect or outdated information [409], and for this reason, model editing has emerged to advocate updating knowledge in LLM by modifying a small part of the parameters. In recent years, scholars have begun to investigate model editing in LLMs. Generally, model editing methods can be mainly categorized into gradient-based [410, 411], memory-based [412, 413] and locate-then-edit methods [414, 415, 416]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 295, + 301, + 583 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 44, + 295, + 301, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 295, + 301, + 388 + ], + "spans": [ + { + "bbox": [ + 44, + 295, + 301, + 388 + ], + "type": "text", + "content": "Gradient. Early approaches [410, 411, 417] advocate that the updating of knowledge in the LLMs is accomplished by modifying the gradient of the LLM. A more recent study [418] revisits gradient-based fine-tuning and demonstrates strong performance through constrained optimization techniques. However, since gradient-based methods are too complex and suffer from pattern collapse, it is gradually being replaced by other research lines [419, 420]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 388, + 301, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 388, + 301, + 456 + ], + "spans": [ + { + "bbox": [ + 44, + 388, + 301, + 456 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 44, + 388, + 301, + 456 + ], + "type": "text", + "content": " Memory. Memory-based methods [412, 413] advocate the introduction of external parameters to assist in updating knowledge. Though effective, models with excessive parameters face the problem of over-parameterization – where the parameter space becomes significantly larger than necessary to capture the underlying data distribution [420, 421]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 458, + 301, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 458, + 301, + 583 + ], + "spans": [ + { + "bbox": [ + 44, + 458, + 301, + 583 + ], + "type": "text", + "content": "- Locate-then-edit. Locate-then-edit methods, represented by RoME [416], MEMIT [421] and AlphaEdit [402], localizing knowledge storage-related neurons by causal tracing, achieving knowledge editing by modifying these neurons, have made breakthroughs in recent years [422, 423, 424]. The locate-then-edit approach has been proven to be effective in updating specific factual knowledge in the LLM [402]. Thus it is widely used to edit the security of LLMs [425, 426]. In the following part, we will focus on the application of the locate-then-edit approach to the security domain." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 44, + 584, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 584, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 584, + 301, + 748 + ], + "type": "text", + "content": "Attack. Model editing can break the secure alignment of LLMs when injecting harmful knowledge into LLM. Chen et.al [425] first proposed the concept of editing attack, constructing a dataset named EDITATTACK, and using editing methods such as RoME [416] and IKE [427] successfully injected harmful, incorrect, and bias information to LLMs. Since model editing modifies the corresponding knowledge in the form of knowledge triples, BadEdit [428] proposes a way to inject triggers using model editing. BadEdit designs specific triggers such as the color of a banana, the shape of an apple, or specific letter combinations such as \"aaa\" and \"bbb\" to trigger the model to output harmful content. Building on this basis, Concept-RoT [429] designs a more invisible approach by proposing " + }, + { + "bbox": [ + 44, + 584, + 301, + 748 + ], + "type": "inline_equation", + "content": "k_{0}" + }, + { + "bbox": [ + 44, + 584, + 301, + 748 + ], + "type": "text", + "content": " based on the concept" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 42, + 564, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 564, + 121 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 564, + 121 + ], + "type": "text", + "content": "of context, and implanting a backdoor against the concept of context by editing the value corresponding to " + }, + { + "bbox": [ + 307, + 42, + 564, + 121 + ], + "type": "inline_equation", + "content": "k_{0}" + }, + { + "bbox": [ + 307, + 42, + 564, + 121 + ], + "type": "text", + "content": ", thus realizing the effect of the conceptual Trojan horse. In addition, DEPN [430] devised a method to first locate private neurons, and secondly edit the specified private neurons through RoME so that the model outputs sensitive private information." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 123, + 565, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 123, + 565, + 205 + ], + "spans": [ + { + "bbox": [ + 308, + 123, + 565, + 205 + ], + "type": "text", + "content": "Defense. Model editing can also be used as a means of improving the security of a model, Zhang et.al [426] proposed a model editing method named DINM, to localize and detoxify toxic neurons via model editing, making the model less susceptible to jailbreaking. In addition, other studies [422, 431, 432] have explored the use of model editing for blue teams. Model editing methods have made big strides" + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 310, + 226, + 565, + 308 + ], + "blocks": [ + { + "bbox": [ + 333, + 211, + 539, + 223 + ], + "lines": [ + { + "bbox": [ + 333, + 211, + 539, + 223 + ], + "spans": [ + { + "bbox": [ + 333, + 211, + 539, + 223 + ], + "type": "text", + "content": "TABLE 5: Model Editing for attack and defense." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 226, + 565, + 308 + ], + "lines": [ + { + "bbox": [ + 310, + 226, + 565, + 308 + ], + "spans": [ + { + "bbox": [ + 310, + 226, + 565, + 308 + ], + "type": "table", + "html": "
MethodsAttack?BackDoor?Defense?Parameter?
RoME[416]
IKE[427]--X
AlphaEdit[402]
BadEdit[428]X
ConceptROT[429]X
DEPN[430]XX
DINM[426]XX
PEM[432]XX
", + "image_path": "3b53e9483d3d0da9577270c0536f43d5dba6fe92ad9bba6ab4c0fd3c60e6cc4b.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 316, + 564, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 316, + 564, + 398 + ], + "spans": [ + { + "bbox": [ + 307, + 316, + 564, + 398 + ], + "type": "text", + "content": "in red team, making them an effective means of injecting risk content into safely aligned models. We summarize the mainstream editing for attacks and defenses in Table 5 and each row in the table represents distinct included content.. Against model editing attacks, no research has been done to make a specific defense against such attacks, so further exploration in this area is an important research topic." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 309, + 408, + 427, + 420 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 408, + 427, + 420 + ], + "spans": [ + { + "bbox": [ + 309, + 408, + 427, + 420 + ], + "type": "text", + "content": "5.2 Safety in Unlearning" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 423, + 565, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 423, + 565, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 423, + 565, + 712 + ], + "type": "text", + "content": "LLMs have demonstrated remarkable capabilities in various tasks, but their training on vast and often unfiltered datasets from the Internet inevitably leads to the absorption of unsafe information [433, 434, 435, 436, 437, 438]. This includes biases [439], stereotypes [440], toxic language [441], misinformation [442, 443, 444], and even private data [71]. Therefore, LLM unlearning is crucial for ensuring their safe and responsible deployment [406, 445], as shown in Figure 6. Unlearning, in this context, refers to the process of selectively removing or mitigating the influence of specific knowledge, behaviors, or data points from a trained LLM [446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456]. Unlearning methods can be distinguished into two broad paradigms [457]: exact (certified) unlearning and heuristic (approximate) unlearning. Exact methods accurately identify poisoned data points or affected parameters, providing formal or statistical guarantees that the specified behaviors no longer influence the model. This typically requires certified retraining from scratch, removing the disallowed data entirely [458]. Two primary paradigms have emerged to achieve approximate unlearning: parameter-adjusting methods, which modify the model's internal weights, and parameter-preserving methods, which intervene externally without altering the core model architecture (refer to Figure 6)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 712, + 564, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 712, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 712, + 564, + 746 + ], + "type": "text", + "content": "Parameter-Adjusting Unlearning. The first paradigm, which involves adjusting the model's parameters, is characterized by its direct intervention in the model's internal" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 388 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 388 + ], + "type": "text", + "content": "structure. This approach typically requires retraining or fine-tuning the model on a curated dataset, designed to counteract the unsafe knowledge or behavior that needs to be unlearned. It also encompasses methods that follow a locate-then-edit pipeline, where specific parameters associated with the target knowledge are identified and directly modified to achieve unlearning [456]. Techniques such as Gradient Ascent [459] and its variations [460] are commonly employed. While traditional fine-tuning using cross-entropy loss is prevalent, more specialized loss functions have been proposed to enhance the control over the outputs of unlearned models, such as KL minimization [461, 462, 463] and the IDK loss function [464]. Additionally, recent work [465] has reframed LLM unlearning as a preference optimization problem [466], utilizing Negative Preference Optimization loss to improve the unlearning process. In contrast to these training-intensive approaches, LaW [456] draws inspiration from model editing by identifying and removing knowledge associations embedded in MLP weights, aiming to eliminate targeted information with minimal impact on the model's overall capabilities. Given the recent powerful multimodal perception and generation nature of LLMs, MMUnlearner [467] proposes to reformulate the setting of multimodal unlearning, which aims at erasing the unwanted visual concept but still preserving textual knowledge. Based on existing multimodal LLM-based unlearning benchmarks [468, 469, 470], SafeEraser [471] further incorporates unlearning mechanism and evaluation into multimodal LLM safety, via introducing Prompt Decouple Loss and a new metric called Safe Answer Refusal Rate." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 48, + 400, + 299, + 586 + ], + "blocks": [ + { + "bbox": [ + 48, + 400, + 299, + 586 + ], + "lines": [ + { + "bbox": [ + 48, + 400, + 299, + 586 + ], + "spans": [ + { + "bbox": [ + 48, + 400, + 299, + 586 + ], + "type": "image", + "image_path": "7d17dc024ae8d367e87d8dd062de9a7bf8de5670470bc7baaeafe76bbb049324.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 592, + 294, + 617 + ], + "lines": [ + { + "bbox": [ + 52, + 592, + 294, + 617 + ], + "spans": [ + { + "bbox": [ + 52, + 592, + 294, + 617 + ], + "type": "text", + "content": "Fig. 6: The taxonomy illustration of LLM Unlearning for safety." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 631, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 631, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 631, + 301, + 748 + ], + "type": "text", + "content": "Parameter-Preserving Unlearning. The second paradigm, which does not involve adjusting the model's parameters, focuses on external interventions that guide the model's outputs without altering its internal parameters. Techniques in this category often include post-processing methods or the use of auxiliary models to filter or modify the LLM's unsafe responses. Editing-based techniques [430, 472, 473, 474] modify specific components of the model architecture or introduce additional modules to counteract unwanted knowledge. Task vector approaches [475, 476]" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 307, + 42, + 566, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 566, + 110 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 566, + 110 + ], + "type": "text", + "content": "leverage the geometric properties of the parameter space to identify and neutralize directions associated with targeted information. More recently, in-context learning strategies [477, 478] have emerged, which guide the LLM's behavior through carefully crafted prompts rather than weight modifications." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 308, + 112, + 566, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 112, + 566, + 159 + ], + "spans": [ + { + "bbox": [ + 308, + 112, + 566, + 159 + ], + "type": "text", + "content": "Although heuristic methods are far more scalable, their guarantees are only empirical. Closing this gap between certified safety and practical feasibility remains a central research challenge for the field." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 309, + 178, + 444, + 190 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 178, + 444, + 190 + ], + "spans": [ + { + "bbox": [ + 309, + 178, + 444, + 190 + ], + "type": "text", + "content": "5.3 Roadmap & Perspective" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 195, + 402, + 207 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 195, + 402, + 207 + ], + "spans": [ + { + "bbox": [ + 309, + 195, + 402, + 207 + ], + "type": "text", + "content": "5.3.1 Model Editing" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 211, + 565, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 211, + 565, + 350 + ], + "spans": [ + { + "bbox": [ + 307, + 211, + 565, + 350 + ], + "type": "text", + "content": "The evolution of model editing traces back to localized factual updates (e.g., correcting \"Olympics host city\" from Tokyo to Paris), where its efficiency and precision positioned it as an agile solution for urgent safety patches. Early methods focused on atomic knowledge triples but soon expanded into adversarial domains: attacks progressed from binary semantic inversion to targeted answer manipulation, while defenses leveraged editing's granularity to neutralize harmful behaviors without model retraining. Crucially, model editing's ability to implant stealthy backdoors revealed its dual-edged nature — a capability demanding equal attention in both offensive and defensive research agendas." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 350, + 565, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 350, + 565, + 523 + ], + "spans": [ + { + "bbox": [ + 307, + 350, + 565, + 523 + ], + "type": "text", + "content": "In the era of sophisticated safety alignment, model editing addresses a critical niche. While safety fine-tuning establishes systematic safeguards through periodic retraining, it struggles with emergent, context-sensitive risks (e.g., geopolitical shifts or cultural updates) that evolve faster than retraining cycles. As LLMs scale, the intervals between alignment updates widen, creating safety gaps exacerbated by catastrophic forgetting risks. Model editing bridges these gaps through rapid surgical interventions — executing updates orders of magnitude faster than alignment procedures — by modifying specific unsafe knowledge or concepts, all while preserving general model stability. In summary, while safety fine-tuning remains essential for systematic alignment, model editing addresses four fundamental limitations in the current era:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 309, + 526, + 564, + 747 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 309, + 526, + 564, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 526, + 564, + 550 + ], + "spans": [ + { + "bbox": [ + 309, + 526, + 564, + 550 + ], + "type": "text", + "content": "- Temporal Agility: Mitigates emergent, unpredictable safety risks that cannot wait for full retraining cycles." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 550, + 564, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 550, + 564, + 597 + ], + "spans": [ + { + "bbox": [ + 309, + 550, + 564, + 597 + ], + "type": "text", + "content": "- Granular Control: Enables surgical modifications to specific reasoning pathways in large reasoning models (LRMs), correcting flawed chain-of-thought logic without disrupting valid inference patterns." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 309, + 597, + 564, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 597, + 564, + 632 + ], + "spans": [ + { + "bbox": [ + 309, + 597, + 564, + 632 + ], + "type": "text", + "content": "- Resource Decoupling: Reduces computational barriers for safety-critical updates, particularly in multimodal settings where traditional retraining costs scale prohibitively." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 309, + 631, + 564, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 631, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 309, + 631, + 564, + 747 + ], + "type": "text", + "content": "- Stable editing: Model editing is an ongoing and iterative process; however, excessive modifications can compromise the model's performance, likely due to the intricate interdependencies among neurons. Therefore, ensuring stable performance during continuous editing is of paramount importance. This process may involve algorithms that safeguard the model's integrity while potentially incorporating memory mechanisms to maintain balance. In summary, altering the original model parameters is a relatively \"risky\" endeavor, and plug-and-play externals" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 42, + 299, + 64 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 42, + 299, + 64 + ], + "spans": [ + { + "bbox": [ + 53, + 42, + 299, + 64 + ], + "type": "text", + "content": "nal modules may emerge as the predominant approach in the future." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 68, + 299, + 92 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 68, + 299, + 92 + ], + "spans": [ + { + "bbox": [ + 44, + 68, + 299, + 92 + ], + "type": "text", + "content": "Future frontiers highlight model editing's unique value proposition. Specifically," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 93, + 299, + 277 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 45, + 93, + 299, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 93, + 299, + 138 + ], + "spans": [ + { + "bbox": [ + 45, + 93, + 299, + 138 + ], + "type": "text", + "content": "- More Hidden Backdoor: By precisely modifying targeted parameters without perturbing unrelated knowledge, edited backdoors evade traditional detection methods that monitor broader model behavior." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 140, + 299, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 140, + 299, + 185 + ], + "spans": [ + { + "bbox": [ + 45, + 140, + 299, + 185 + ], + "type": "text", + "content": "- Multimodal Safety: In multimodal systems, editing reduces the computational burden of aligning heterogeneous data streams by selectively modifying cross-modal attention mechanisms." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 186, + 299, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 186, + 299, + 232 + ], + "spans": [ + { + "bbox": [ + 45, + 186, + 299, + 232 + ], + "type": "text", + "content": "- Concept-Level Safety: Directly edit abstract safety concepts (e.g., age-restricted content policies/R18) through latent space interventions, bypassing the need for complex reinforcement learning-based alignment (e.g., DPO)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 232, + 299, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 232, + 299, + 277 + ], + "spans": [ + { + "bbox": [ + 45, + 232, + 299, + 277 + ], + "type": "text", + "content": "- Interpretability-driven Safety: The model editing's interpretability dimension further provides causal insights into safety-critical model behaviors, informing robust verification frameworks." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 44, + 280, + 300, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 280, + 300, + 373 + ], + "spans": [ + { + "bbox": [ + 44, + 280, + 300, + 373 + ], + "type": "text", + "content": "Critically, model editing complements - rather than replaces - systematic alignment, forming a hybrid governance paradigm: systematic alignment ensures broad ethical guardrails, while model editing enables surgical adaptations to emerging threats, i.e., establishing a closed-loop governance system for sustainable safe deployment. Together, they will form the twin pillars of LLM safety in the future." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 385, + 126, + 397 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 385, + 126, + 397 + ], + "spans": [ + { + "bbox": [ + 45, + 385, + 126, + 397 + ], + "type": "text", + "content": "5.3.2 Unlearning" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 44, + 399, + 300, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 399, + 300, + 526 + ], + "spans": [ + { + "bbox": [ + 44, + 399, + 300, + 526 + ], + "type": "text", + "content": "The concept of machine unlearning has evolved from a specialized issue in traditional machine learning to a key aspect of responsible AI governance for LLMs. Early efforts in unlearning primarily focused on removing data from smaller, more specialized models, often in response to privacy regulations such as the GDPR's \"right to be forgotten\" [446]. However, with the advent of LLMs—trained on vast, diverse, and often uncontrolled datasets—the landscape of machine unlearning has undergone significant transformation. This shift has introduced new challenges and imperatives that were previously unforeseen." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 44, + 527, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 527, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 527, + 301, + 748 + ], + "type": "text", + "content": "The initial phase of LLM unlearning focused on adapting existing techniques—primarily parameter-adjusting methods like gradient ascent [459] and fine-tuning variants [461, 462, 463, 464, 479]—to the scale and complexity of LLMs. While this phase demonstrated the feasibility of unlearning, it also highlighted several fundamental limitations, such as computational cost [445, 449], catastrophic forgetting [451], and lack of granularity [406]. These limitations have driven the development of more refined approaches, such as parameter-preserving methods [472, 475, 476, 477, 478]. These methods, which utilize techniques like task arithmetic and in-context learning, provide a glimpse of a future where unlearning can be achieved with greater efficiency and precision. The shift to multimodal LLMs has further expanded the scope, necessitating unlearning methods that can address the safety concerns arising from the interaction between different modalities [467, 468, 469, 470, 471]. The current landscape of LLM unlearning can be described as a shift from reactive “data deletion” to proactive “knowledge" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 42, + 563, + 78 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 563, + 78 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 563, + 78 + ], + "type": "text", + "content": "sculpting.\" We are moving beyond merely removing information to precisely shaping the model's understanding and behavior. This shift is driven by several key insights:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 309, + 83, + 564, + 244 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 309, + 83, + 564, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 83, + 564, + 140 + ], + "spans": [ + { + "bbox": [ + 309, + 83, + 564, + 140 + ], + "type": "text", + "content": "- Unlearning as Preference Optimization: By framing unlearning as preference learning, we can align the model's output with desired safety and ethical guidelines, utilizing techniques like Negative Preference Optimization [465, 466] or safety-oriented preference optimization [480]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 309, + 141, + 564, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 141, + 564, + 198 + ], + "spans": [ + { + "bbox": [ + 309, + 141, + 564, + 198 + ], + "type": "text", + "content": "- The Importance of Context: Since the \"unsafety\" of information is often context-dependent, researchers are developing methods to selectively unlearn behaviors in specific situations while maintaining the model's general capabilities [477, 481, 482, 483]." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 309, + 198, + 564, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 198, + 564, + 244 + ], + "spans": [ + { + "bbox": [ + 309, + 198, + 564, + 244 + ], + "type": "text", + "content": "- Multimodal Unlearning: Addressing the fusion of modalities (text, images, audio) presents new challenges in removing unwanted concepts and behaviors both within and across modalities [467, 471, 484]." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 309, + 250, + 564, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 250, + 564, + 273 + ], + "spans": [ + { + "bbox": [ + 309, + 250, + 564, + 273 + ], + "type": "text", + "content": "Looking ahead, several critical areas are essential for further advancement in the field:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 309, + 278, + 564, + 660 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 309, + 278, + 564, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 278, + 564, + 336 + ], + "spans": [ + { + "bbox": [ + 309, + 278, + 564, + 336 + ], + "type": "text", + "content": "- Principled Evaluation Metrics: Robust, standardized benchmarks are necessary to accurately assess unlearning effectiveness and potential side effects. These metrics should move beyond simplistic, easily manipulated measures [450, 476, 485, 486, 487]." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 309, + 337, + 564, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 337, + 564, + 394 + ], + "spans": [ + { + "bbox": [ + 309, + 337, + 564, + 394 + ], + "type": "text", + "content": "- Theoretical Foundations: A deeper understanding of the mechanisms behind unlearning in LLMs is needed to develop truly reliable techniques [451, 488]. This includes exploring why unlearning is challenging and how different methods affect internal representations." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 309, + 395, + 564, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 395, + 564, + 464 + ], + "spans": [ + { + "bbox": [ + 309, + 395, + 564, + 464 + ], + "type": "text", + "content": "- Hybrid Approaches: Combining parameter-adjusting methods (for coarse-grained removal) with parameter-preserving techniques (for fine-grained refinement) presents a promising path forward. This aligns with the \"hybrid governance paradigm\" from Model Editing, allowing for both broad and precise interventions." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 309, + 464, + 564, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 464, + 564, + 555 + ], + "spans": [ + { + "bbox": [ + 309, + 464, + 564, + 555 + ], + "type": "text", + "content": "- Unlearning for Interpretability: Instead of using interpretability solely to guide unlearning, the unlearning process itself can be used to enhance our understanding of model behavior [489]. By selectively removing knowledge and observing the consequences, we gain causal insights into the model's reasoning. This represents a fundamentally different and more powerful use of unlearning—this is the key \"dry goods\" insight." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 309, + 555, + 564, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 555, + 564, + 660 + ], + "spans": [ + { + "bbox": [ + 309, + 555, + 564, + 660 + ], + "type": "text", + "content": "- Unlearning Benchmark: Building upon the aforementioned insight, it is evident that unlearning currently lacks a standardized benchmark. Establishing a method to effectively balance a model's ability to forget while systematically ensuring its performance remains reliable is crucial (Figure 7). In the realm of multimodal learning, creating such a benchmark could be even more complex, potentially representing a pivotal step in advancing this field [471, 490, 491, 492, 493]." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 308, + 665, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 665, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 665, + 564, + 748 + ], + "type": "text", + "content": "In conclusion, LLM unlearning is not merely a technical challenge; it is a fundamental requirement for building trustworthy and beneficial AI systems or even agent ecosystems [494, 495]. It is evolving from a reactive measure to a proactive design principle, shaping the very foundations of how LLMs learn, adapt, and interact with the world. The journey from \"forgetting\" to \"knowledge sculpting\"" + } + ] + } + ], + "index": 25 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 45, + 299, + 228 + ], + "blocks": [ + { + "bbox": [ + 52, + 45, + 299, + 228 + ], + "lines": [ + { + "bbox": [ + 52, + 45, + 299, + 228 + ], + "spans": [ + { + "bbox": [ + 52, + 45, + 299, + 228 + ], + "type": "image", + "image_path": "3a418fa605c423149f828c2f001b5edc46cfe6b96a344e91855296aab87fa433.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 236, + 301, + 281 + ], + "lines": [ + { + "bbox": [ + 45, + 236, + 301, + 281 + ], + "spans": [ + { + "bbox": [ + 45, + 236, + 301, + 281 + ], + "type": "text", + "content": "Fig. 7: We define the goal of unlearning as maximizing both model utility and forget quality, meaning that algorithms positioned closer to the top-right corner are considered more reliable." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 303, + 301, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 303, + 301, + 327 + ], + "spans": [ + { + "bbox": [ + 44, + 303, + 301, + 327 + ], + "type": "text", + "content": "is underway, promising a future where LLMs can be both powerful and aligned with human values [496, 497, 498]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 342, + 251, + 355 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 342, + 251, + 355 + ], + "spans": [ + { + "bbox": [ + 45, + 342, + 251, + 355 + ], + "type": "text", + "content": "6 LLM(-AGENT) DEPLOYMENT SAFETY" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 357, + 301, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 357, + 301, + 498 + ], + "spans": [ + { + "bbox": [ + 44, + 357, + 301, + 498 + ], + "type": "text", + "content": "In this section, we focus on the safety of LLM and LLM-agent during the deployment phase, addressing three progressively broader dimensions: LLM Safety (Section 6.1), Single-agent Safety (Section 6.2), and Multi-agent Safety (Section 6.3). We begin by discussing the potential threats and defense mechanisms associated with the foundational LLM during inference. Subsequently, we explore the additional security risks introduced by supplementary modules, which impact both individual agents and multi-agent systems. This structured approach ensures a comprehensive understanding of safety challenges at varying scales of LLM(-agent) deployment." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 512, + 157, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 512, + 157, + 525 + ], + "spans": [ + { + "bbox": [ + 45, + 512, + 157, + 525 + ], + "type": "text", + "content": "6.1 Deployment Safety" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 526, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 526, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 526, + 301, + 746 + ], + "type": "text", + "content": "The deployment of a single LLM introduces significant security challenges, including adversarial attacks, data privacy risks, and content integrity concerns. This subsection systematically examines these issues by first analyzing key attack vectors (Subsection 6.1.1), such as model extraction, membership inference, jailbreak attacks, prompt injection, data extraction, and prompt stealing, which threaten model confidentiality, robustness, and ethical compliance. Next, we explore defensive mechanisms (Subsection 6.1.2), including input preprocessing, output filtering, robust prompt engineering, and system-level security controls aimed at mitigating these threats. Finally, we discuss evaluation and benchmarking (Subsection 6.1.3), covering robustness, content safety, privacy leakage, multi-modal safety, and standardized security benchmarks, ensuring a comprehensive assessment of LLM deployment safety. This structure follows a logical progression from identifying threats to implementing defenses and establishing reliable evaluation methodologies." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 42, + 434, + 54 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 42, + 434, + 54 + ], + "spans": [ + { + "bbox": [ + 309, + 42, + 434, + 54 + ], + "type": "text", + "content": "6.1.1 Attack in Deployment" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 309, + 59, + 529, + 71 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 59, + 529, + 71 + ], + "spans": [ + { + "bbox": [ + 309, + 59, + 529, + 71 + ], + "type": "text", + "content": "We first give an overview of the attacks in Figure 8." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 312, + 85, + 563, + 213 + ], + "blocks": [ + { + "bbox": [ + 312, + 85, + 563, + 213 + ], + "lines": [ + { + "bbox": [ + 312, + 85, + 563, + 213 + ], + "spans": [ + { + "bbox": [ + 312, + 85, + 563, + 213 + ], + "type": "image", + "image_path": "3c01c7964a0ec82961f085a41736bb2f4f02d485345f84dac051957673a31670.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 220, + 564, + 243 + ], + "lines": [ + { + "bbox": [ + 309, + 220, + 564, + 243 + ], + "spans": [ + { + "bbox": [ + 309, + 220, + 564, + 243 + ], + "type": "text", + "content": "Fig. 8: The overview of attacks in single LLM's deployment phase." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 256, + 565, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 256, + 565, + 453 + ], + "spans": [ + { + "bbox": [ + 307, + 256, + 565, + 453 + ], + "type": "text", + "content": "Model Extraction Attacks. Model extraction attacks aim to steal a deployed language model, which only provides an Application Programming Interface (API) that processes text input (i.e., a prompt) and returns generated outputs. He et al. and Peng et al. [499, 500, 501, 502] made a series of early efforts in launching model extraction or stealing attacks against LLMs (even deployed as a service) and proposed various defense mechanisms to mitigate such risks. Carlini et al. [503] conducted the model-stealing attack against a black-box large language model by targeting its embedding projection layer. Building on this, Finlayson et al. [504] further investigated the risk of stealing embedding dimensions by exploiting the softmax bottleneck. Another line of research explores model extraction in a gray-box setting. For instance, Zanella et al. [505] demonstrated the feasibility of stealing high-fidelity language models when given access to a frozen or fine-tuned encoder." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 453, + 564, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 453, + 564, + 581 + ], + "spans": [ + { + "bbox": [ + 307, + 453, + 564, + 581 + ], + "type": "text", + "content": "Another category of model extraction attacks focuses on recovering the full weight of an LLM. For instance, Horwitz et al. [506] successfully reconstruct a pre-fine-tuned LLM (i.e., the pre-trained model before fine-tuning) using its fine-tuned variants, such as low-rank adaptation (LoRA) models. Beyond general model-stealing attacks, some research explores threats to specialized capabilities. Li et al. [507] extract the coding abilities of an LLM, including code synthesis and translation. Additionally, Liu et al. [508] propose a theoretically grounded method for stealing any low-rank language model." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 581, + 564, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 581, + 564, + 616 + ], + "spans": [ + { + "bbox": [ + 308, + 581, + 564, + 616 + ], + "type": "text", + "content": "Membership Inference Attacks. Membership Inference Attack (MIA) tries to figure out whether a given candidate is included in the training dataset of an LLM [117, 509]." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 309, + 619, + 565, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 619, + 565, + 748 + ], + "spans": [ + { + "bbox": [ + 309, + 619, + 565, + 748 + ], + "type": "text", + "content": "Methods. [509] propose the first MIA with MIN-K% PROB, which identifies examples that contain few outlier words with low probabilities as non-members. Afterward, [510] propose MIN-K%++, which simulates the membership inference into identifying local maxima. Some works reveal that the success of MIAs against LLMs may be due to sampling non-members from different distributions. Thus, [511] propose Blind attack, which conducts MIA by applying a threshold and completely ignores the target model. [512] selectively combine the existing MIAs and aggregate their scores to perform a statistical test. [513]" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 42, + 301, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 42, + 301, + 157 + ], + "spans": [ + { + "bbox": [ + 52, + 42, + 301, + 157 + ], + "type": "text", + "content": "identify the membership of a verbatim text by constructing paraphrased options (with another proxy model) and asking the target LLM for true verbatim. [514] examine the relative change in conditional log-likelihoods when prefixing target data points with non-member context. [515] propose to generate noisy neighbors for a target sample by adding stochastic noise in the embedding space. [516] train a neural network to capture variations in output probability distributions between members and non-members." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 41, + 158, + 302, + 643 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 41, + 158, + 301, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 158, + 301, + 274 + ], + "spans": [ + { + "bbox": [ + 41, + 158, + 301, + 274 + ], + "type": "inline_equation", + "content": "\\nRightarrow" + }, + { + "bbox": [ + 41, + 158, + 301, + 274 + ], + "type": "text", + "content": " Document-level MIAs. Some works focus on document-level MIAs. Meeus et al. [517] propose the first MIA for document-level leakage, which contains four steps: retrieving, normalizing, aggregating, and predicting. After that, Meeus et al. [518] validate that it doesn't work against models that do not naturally memorize and propose to utilize copyright traps to detect the use of copyrighted materials. Puerto et al. [519] make exploration toward collection-level MIA against LLMs by computing features and two-stage aggregation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 41, + 274, + 302, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 274, + 302, + 528 + ], + "spans": [ + { + "bbox": [ + 41, + 274, + 302, + 528 + ], + "type": "text", + "content": "Different Settings. Some works also explore the MIA risk in novel settings. Anderson et al. [520] propose the first MIA against Retrieval Augmented Generation (RAG) systems by directly asking whether one candidate is its member or not. Li et al. [521] compare the output semantic similarity of the sample for the RAG system and the remaining to determine the membership of RAG's database. Zhang et al. [522] propose the first MIA against in-context learning and four attack methods, including GAP, Inquiry, Repeat, and Brainwash. Meanwhile, Duan et al. [523] reveal that MIA risk in in-context learning is more severe than in the fine-tuning setting. Wen et al. [524] conduct membership inference of fine-tuning data by poisoning pretraining data and backdoorsing the pre-trained model. Then Wen et al. [525] comprehensively assess the MIA risk against adaptation methods, including LowRank Adaptation (LoRA), Soft Prompt Tuning (SPT), and In-Context Learning (ICL). Balloccu et al. [526] study the indirect data contamination for closed-source LLMs, which can also be regarded as MIA. Fu et al. [527] propose Self-calibrated Probabilistic Variation, which fine-tunes the reference model by prompting the target LLM." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 41, + 527, + 302, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 527, + 302, + 643 + ], + "spans": [ + { + "bbox": [ + 41, + 527, + 302, + 643 + ], + "type": "inline_equation", + "content": "\\nRightarrow" + }, + { + "bbox": [ + 41, + 527, + 302, + 643 + ], + "type": "text", + "content": " Factor Impact. Duan et al. [117] find that the existing MIAs work poorly on LLM due to massive training data and near-one epoch training. Li et al. [528] clarify the impact of fine-tuning and evaluation metrics and propose a three-phase framework (i.e. training, simulation, and confidence calculation) to assess membership leakage. Kandpal et al. [87] find that duplication of training data highly extends the risk of MIA. Naseh et al. [529] validate that using synthetic data in membership evaluations may lead to false classification results." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 44, + 654, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 654, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 654, + 301, + 748 + ], + "type": "text", + "content": "Jailbreak Attacks. Jailbreak attacks aim to induce the large language model to generate unsafe content like violence [260]. Jailbreak attacks focus on bypassing the safety rules, including system safety prompts and safety filters, while prompt injection attacks target all system prompts. Lots of literature have studied the vulnerability of LLM, where different terms, including \"jailbreak attack\" and \"redteaming\", all point to the same safety vulnerability that" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 564, + 65 + ], + "type": "text", + "content": "generates unsafe content. We classify them into two main categories, i.e. optimization-based and strategy-based." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 65, + 564, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 65, + 564, + 203 + ], + "spans": [ + { + "bbox": [ + 307, + 65, + 564, + 203 + ], + "type": "text", + "content": "Strategy-based jailbreaks figure out novel strategies or templates to generate one adversarial prompt at a heat to test LLMs' vulnerabilities, which are pre-defined. Thus, the generated prompt is non-evolvable. Specifically, useful strategies include persuasion [559], role-playing [560, 561, 562, 563], cipher [564, 565], ASCII [566], long-context [567], low-resource language [568, 569], in-context malicious demonstration [570], overloaded logical thinking [571], misspelling [572], multi-language mixture [573], rephrasing [538, 574, 575, 576], competing objectives and generalization mismatch [577], [wenjie: splitting sub-queries [578]], zero-shot generation [579], personal modulation [580]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 203, + 564, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 203, + 564, + 249 + ], + "spans": [ + { + "bbox": [ + 307, + 203, + 564, + 249 + ], + "type": "text", + "content": "Optimization-based jailbreaks contain a multi-step optimization process to revise one unsafe prompt. Here, we further divide the optimization-based jailbreaks into gradient-based and LLM-based ones:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 250, + 565, + 748 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 307, + 250, + 565, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 250, + 565, + 538 + ], + "spans": [ + { + "bbox": [ + 307, + 250, + 565, + 538 + ], + "type": "text", + "content": "Gradient-based Optimization. GCG [260] appends one suffix to the target prompt, then utilizes the gradient of loss, which is calculated with the target (e.g., \"Sure\" or \"Yes\") and output, to optimize the soft prompt. Then, it greedily searches the best-matched tokens in the dictionary for soft prompt replacement. AutoDAN-B [535] solves the limited readability of GCG by constructing a proxy score where the perplexity is considered, which is utilized for greedy sampling. I-GCG [531] improves GCG by appending a template before the suffix and uses a multi-coordinate updating strategy and easy-to-hard initialization to optimize the suffix. COLD-Attack [581] adapts Energy-based Constrained Decoding with Langevin Dynamics for controllable adversarial prompt generation. MA-GCG [532] proposes momentum gradient to boost and stabilize the greedy search for tokens in adversarial prompts. A-GCG [533] introduces a smaller draft model than the target model to sample the promising suffix candidates for faster optimization. BOOST [582] enhances the existing jailbreak attacks by adding eos tokens to the end of the unsafe prompt. CRT [583] proposes an enhanced reinforcement learning-based jailbreak with consideration of prompt diversity. I-FSJ [584] deploys few-shot learning and demo-level random search." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 538, + 565, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 538, + 565, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 538, + 565, + 748 + ], + "type": "inline_equation", + "content": "\\Rightarrow" + }, + { + "bbox": [ + 307, + 538, + 565, + 748 + ], + "type": "text", + "content": " LLM-based Optimization. PAIR [261] constructs a system prompt and uses an attacker LLM to generate and revise adversarial prompts. It also uses a Judge model to assess the feedback from the victim, which is further utilized for revising the adversarial prompt. AutoDAN-A [534] utilizes crossover strategies and LLM-based mutation to revise adversarial prompts into stealthy sentences. AntoDAN-Trubo [539] AutoDAN-Turbo proposes to find useful strategies by prompting an LLM automatically. ToA (Tree of Attack) [536] iteratively uses an LLM to transform the unsafe prompt into two variations and keeps the prompt variation that achieves a higher score. Xiao et al. [585] adopt a similar pipeline with PAIR [261] and introduce malicious content concealing and memory reframing. Puzzler [586] proposes defensive and offensive measures to conduct an indirect jailbreak. GPT-FUZZER [587] starts from human-written prompts, and uses templates and mutation to rewrite unsafe prompts." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 100, + 564, + 715 + ], + "blocks": [ + { + "bbox": [ + 45, + 70, + 564, + 95 + ], + "lines": [ + { + "bbox": [ + 45, + 70, + 564, + 95 + ], + "spans": [ + { + "bbox": [ + 45, + 70, + 564, + 95 + ], + "type": "text", + "content": "TABLE 6: A summary of attacks for LLM after deployment. Our evaluation includes representative studies that exemplify these security aspects. More details can be found in the main text. OS indicates whether the code is open-sourced." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 100, + 564, + 715 + ], + "lines": [ + { + "bbox": [ + 47, + 100, + 564, + 715 + ], + "spans": [ + { + "bbox": [ + 47, + 100, + 564, + 715 + ], + "type": "table", + "html": "
AttacksMethodOSYearStrategySettingDatasetsTarget ModelsMetrics
Model ExtractionCarlini et al. [503]Yes2024Binary SearchBlack-boxNoneGPTs, LLaMA, Pythia,ada, cabbageQuery&TokenCost, MSE, RMS
Finlayson et al. [504]No2024Softmax BottleneckBlack-boxNonePythia, GPT-3.5Query Cost
Zanella et al. [505]No2024Matrix OperationsGrey-boxSST-2, MNLI, AGNewsBERTs, XLNetQuery Cost, Acc,Agreement
Horwitz et al. [506]Yes2024Spectral DeTuningWhite-boxLoWRAViT, SD, MistralMSWE, SEM
Membership InferenceMIN-K% PROB [509]Yes2023ProbabilitiesBlack-boxWikipediaLLaMAs, Pythia, NeoX,OPTTPR, FPR, ROC,AUC
MIN-K%++ [510]Yes2022Local MaximaBlack-boxWikiMIA, MIMIRPythia, GPT-NeoXLLaMA, OPT, MambaAUROC, TPR, FPR
Blind [511]Yes2024ThresholdBlack-box8 setsGPT-3, OpenLLaMAAUC ROC
LLM-DI [512]Yes2024AggregationBlack-boxPILEPythiasAUC, p-values
DE-COP [513]Yes2024ParaphrasesBlack-boxarXiv:Tection, BookTectionMistral, Mixtral, LLaMA, GPTs, ClaudeAUC
Recall [514]Yes2024Log-LikelihoodsBlack-boxWikiMIA, MIMIRPythia, GPT-NeoXLLaMA, OPT, MambaAUC, TPR@FPR
Noisy [515]No2024Embedding NGBRsGray-boxOpenWebText,WikipediaGPT-2TPR, FPR, AUC
SMIA [516]No2024PerturbationGray-boxWikipedia, FANPythia, Pythia-Deduped, GPT-NeosAUC-ROC, TPR, FPR
FEATAGG [517]No2024Feature AggregationBlack-boxProjectGutenberg,ArXivOpenLLaMATPR@FPR, AUC
RAG-MIA [520]No2024Direct AskingBlack-boxHealthCareMagic,Enronflan, llama, mistralTPR@FPR, AUC-ROC
JailbreakGCG [260]Yes2023Gradient-basedWhite-boxVicuna, LLaMA-2AdvBenchASR, Loss
AmpleGCG [530]Yes2024Hybrid-basedWhite-boxVicuna, Llama-2,Mis-tral,GPTsAdvBenchASR, US, Diver-sity, Time
I-GCG [531]Yes2024Gradient-basedWhite-boxAdvBench,HarmBenchVICUNA, GUANACOLLAMA, MISTRALASR
MA-GCG [532]Yes2024Gradient-basedWhite-boxAdvBenchVicuna, MistralASR, Time
A-GCG [533]Yes2024Gradient-basedWhite-boxAdvBenchLlama2, VicunaASR, Acc
AutoDAN-A [534]Yes2023LLM-basedBlack-boxAdvBenchVicuna, MistralASR, Recheck,PPL
AutoDAN-B [535]Yes2023Gradient-basedWhite-boxAdvBenchVicuna, Guanaco, PythiaASR, Recheck
PAIR [261]Yes2023LLM-basedBlack-boxJailbreakBenchVicuna, Llama-2, GPTs,Claudes,GeminiASR, QPS
ToA [536]Yes2023LLM-basedBlack-boxAdvBench, Harm123Vicuna, Llama-2, PaLM-2,GPTs, Claude3, GeminiGPT4-MetricHuman-Judge
PAL [537]Yes2024LLM-basedBlack-boxAdvBenchLlama-2, GPT-3.5ASR, Manual Labeling
Masterkey [538]No2023RephrasingBlack-boxAdvBench, Harm123GPTs, Bing, BardASR, QSR
AutoDAN-Turbo [539]Yes2024LLM-basedBlack-boxHarmbenchLlama-2, Gemma, GPT-4,GeminiASR, StrongRE-JECT
FlipAttack [540]Yes2025RephrasingBlack-boxAdvBench, StrongRE-JECTGPTs, Claude 3.5 Sonnet, Llama 3.1 405B, Mixtral 8x22BASR
Geneshift [541]Yes2025LLM-basedBlack-boxAdvBenchGPTsASR
Prompt InjectionIPP [542]Yes2022HandcraftBlack-boxOpenAI Examplestext-davinciASR
Greshake et al. [543]Yes2023Data PoisoningBlack-boxNonetext-davinci, GPT-4None
HOUYI [544]Yes2023Components AsmblBlack-boxFive QueriesSUPERTOOLSManual
Yan et al. [130]Yes2023PoisoningBlack-boxSeveral CasesAlpacaNgt, Pst, Ocrc
TT [545]No2023GameBlack-boxTensor TrustGPTs, Claudes, PaLM, LLaMAsRobustness Rate
JudgeDeceiver [546]Yes2024Gradient-basedWhite-boxMT-Bench, LLMBarMRPC, Jfleg, HSOL,RTE, SST2, SMSMistral, Openchat, LlamasACC, ASR, PACKEY-E, LM-E
AUPI [547]Yes2024Gradient-basedWhite-boxMRPC, Jfleg, HSOL,RTE, SST2, SMSLlama2ASR
AUTOHIJACKER [548]No2024LLM-basedBlack-boxAgentDojo, OPILlama, Command-R,GPTsASR
Data Extractionzlib [108]Yes2020Generate & InferenceBlack-boxTop-n, Temperature, InternetGPT-26 metrics
AutoSklearn [549]No2023Greedy, Contrastive, Beam decodingBlack-boxPileGPT-NeoPrecision, Recall,R@FPR
DECOM [550]No2024DecompositionBlack-boxNYT, WSJFrontiersTRM, EMP,BITAP
Context [551]No2022Context, Zero-shot,Few-shotBlack-boxEnron CorpusGPT-NeoAcc
ETHICIST [552]Yes2023Prompt TuningGray-boxLM-ExtractionGPT-NeoRecall
Pli-compass [553]No2024GroundingBlack-boxEnron emailGPT-JExtraction Rate
DSP [554]No2024Dynamic Soft PromptingBlack-boxLMEB, The StackGPT-Neo, Pythia, Star-CoderBaseEER, FER, PPL
PWB [555]Yes2024Gradient-basedWhite-boxPilePythia, LlamaPrecision, AUC,TPR
Prompt StealingSha et al. [556]No2024LLM-basedBlack-boxRetrievalQA,AlpacaGPT4ChatGPT, LLaMAAcc, Precision, Recall, AUC
output2prompt [557]Yes2024LLM-basedBlack-box3 User & 3 SystemPromptsLlamas, GPTsBLEU, CS, Preci-sion, Recall
PRSA [558]No2024Output DifferenceBlack-boxCategory18GPTsBLEU, FastKAS-SIM, JS
", + "image_path": "1b920951359c81b1de588e6f741d867a8d8080726518d9c47818f949e3c7423e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 41, + 300, + 88 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 41, + 300, + 88 + ], + "spans": [ + { + "bbox": [ + 52, + 41, + 300, + 88 + ], + "type": "text", + "content": "ECLIPSE [588] uses an LLM as a suffix generator and optimizer. PAL [537] proposes an online proxy model (which is used for adversarial prompt generation) training pipeline." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 41, + 88, + 301, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 88, + 301, + 170 + ], + "spans": [ + { + "bbox": [ + 41, + 88, + 301, + 170 + ], + "type": "inline_equation", + "content": "\\Rightarrow" + }, + { + "bbox": [ + 41, + 88, + 301, + 170 + ], + "type": "text", + "content": " Others. EnJa [589] proposes to ensemble prompt and token-level attack methods via a template-based connector. AmpleGCG [530] first collects lots of successful suffixes and then trains the generative model to generate a specific suffix for a given unsafe prompt. Zhao et al. [590] targets the scenario where the decoding process of target LLM is assisted with smaller models' guidance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 41, + 175, + 301, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 175, + 301, + 256 + ], + "spans": [ + { + "bbox": [ + 41, + 175, + 301, + 256 + ], + "type": "text", + "content": "Prompt Injection Attacks. Prompt injection is a vulnerability where an attacker manipulates the input prompts of LLMs to force them to generate a specific output, which is usually out of the range for normal use (e.g., goal hijacking and prompt leaking [542]), often by injecting malicious text or commands into the input field. Attackers can employ a variety of techniques to carry out such attacks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 41, + 261, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 41, + 261, + 301, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 261, + 301, + 469 + ], + "spans": [ + { + "bbox": [ + 41, + 261, + 301, + 469 + ], + "type": "inline_equation", + "content": "\\Rightarrow" + }, + { + "bbox": [ + 41, + 261, + 301, + 469 + ], + "type": "text", + "content": " Direct Prompt Injection. Perez et al. [542] directly inject handcrafted adversarial prompts into inputs to misalign the language model. HOUYI [544] proposes an injection generation framework which includes three components. Yan et al. [130] utilize LLMs to generate diverse trigger instructions that implicitly capture the characteristics of trigger scenarios. TENSOR TRUST leverages the TENSOR TRUST web game to generate a large-scale dataset and benchmark [545]. AUPI [547] adopts a gradient-based optimization method, specifically, a momentum-enhanced optimization algorithm, to generate universal prompt injection data. Upadhayay et al. [591] argue that LLMs suffer from cognitive overload and propose to use in-context learning to jailbreak LLMs through deliberately designed prompts that induce cognitive overload. Kwon et al. [592] circumvent security policies by substituting sensitive words—likely to be rejected by the language model—with mathematical functions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 41, + 469, + 301, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 469, + 301, + 700 + ], + "spans": [ + { + "bbox": [ + 41, + 469, + 301, + 700 + ], + "type": "inline_equation", + "content": "\\nLeftrightarrow" + }, + { + "bbox": [ + 41, + 469, + 301, + 700 + ], + "type": "text", + "content": " Indirect Prompt Injection. Greshake et al. [543] propose to indirectly inject prompts into the data that are likely to be retrieved. Bagdasaryan et al. [593] design a prompt injection attack against multi-modal LLMs, by generating an adversarial perturbation corresponding to the prompt and blending it into an image or audio recording. Neural Exec [594] designs a multi-stage preprocessing pipeline for cases like Retrieval-Augmented Generation (RAG)-based applications. PoisonedAlign [595] boosts the success of prompt injection attacks by strategically creating poisoned alignment samples in the LLM's alignment process. TPIA [596] crafts non-functional perturbations that contain malicious information and inserts them into the victim's code context by spreading them into potentially used dependencies like packages or RAG's knowledge base. F2A [597] proposes to use feign security detection agents to bypass the defense mechanism of LLMs. AUTOHIJACKER [548] uses a batch-based optimization framework to handle sparse feedback and leverages a trainable memory to enable effective generation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 41, + 700, + 301, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 41, + 700, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 41, + 700, + 301, + 747 + ], + "type": "text", + "content": "Different Settings. JudgeDeceiver uses gradient-based optimization to inject LLM-as-a-Judge scenarios [546]. Pedro et al. [598] study the risk of injections targeting web applications based on the Langchain framework. Lee et" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 41, + 564, + 124 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 41, + 564, + 124 + ], + "spans": [ + { + "bbox": [ + 315, + 41, + 564, + 124 + ], + "type": "text", + "content": "al. [599] propose a human-AI collaborative framework to explore the potential of prompt injection against federated military LLMs. PROMPT INFECTION [600] proposes to make malicious prompts self-replicate across interconnected agents in multi-agent systems. Zhang et al. [601] explore the risk of prompt injection in LLM-integrated systems like LLM-integrated mobile robotic systems." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 129, + 565, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 129, + 565, + 186 + ], + "spans": [ + { + "bbox": [ + 308, + 129, + 565, + 186 + ], + "type": "text", + "content": "Data Extraction Attacks. Data extraction attacks try to figure out the personally identifiable information (PII) that is used to train the LLMs [108]. It starts from sufficient-length prefixes to perform extraction and additional measures to determine if extracted texts are valid." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 306, + 192, + 565, + 747 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 307, + 192, + 565, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 192, + 565, + 479 + ], + "spans": [ + { + "bbox": [ + 307, + 192, + 565, + 479 + ], + "type": "inline_equation", + "content": "\\nLeftrightarrow" + }, + { + "bbox": [ + 307, + 192, + 565, + 479 + ], + "type": "text", + "content": " Methods. In the beginning work [108], the proposed extraction process contains two stages \"generate-then-rank\": sampling potentially memorized examples and membership inference. It proposes a temperature-decaying method to sample more diverse examples and use surrogate models to infer the membership. After that, Al-Kaswan et al. [549] propose using greedy, contrastive, and beam decoding strategies to generate examples and use a classifier to infer the membership. Su et al. [550] propose an instruction decomposition technique to extract fragments of training data gradually. Huang et al. [551] extensively explore the effect of context, zero-shot, and few-shot methods in extracting the personal email address. ETHICIST proposes a smoothing loss and a calibrated confidence estimation method to extract the suffix and measure the confidence [552]. Nakka et al. [553] improves the extraction performance by grounding the prefix of the manually constructed extraction prompt with in-domain data. Wang et al. [554] propose to train a transformer-based generator to produce dynamic, prefix-dependent soft prompts. Ozdayi et al. [105] introduce an approach that uses prompt tuning to control the extraction rates of memorized content. Meng et al. [602] propose a two-stage method, i.e., collection and ranking, to recover PPI when PII entities have been masked." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 481, + 565, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 481, + 565, + 734 + ], + "spans": [ + { + "bbox": [ + 307, + 481, + 565, + 734 + ], + "type": "text", + "content": "Different Settings. Some works also explore the risk of data leakage in novel settings. Wang et al. [555] study the probability of data extraction in fine-tuning settings and Bargav et al. [603, 604] extract the training data by comparing the output difference before and after the fine-tuning. Jiang et al. [605, 606, 607] propose to extract the private Retrieval-Augmented Generation (RAG) documents. Peng et al. [608] extract the private RAG documents by poisoning in the fine-tuning process. Nasr et al. [107] explore the potential risk of data extraction for the aligned production language models. Panda et al. [609] extract the fine-tuning secret data by poisoning the pertaining dataset. Lu et al. [610] propose to extract PII from an aligned model with model merging. Chen et al. [611] find that fine-tuning can recover the forgotten PIIs in pretraining data. Panchendrarajan et al. [612] propose to extract the whole private training data in the fine-tuning process. Rashid et al. [613] propose selective weight tampering to explore PPI leakage in Federated Language Models. Dentan et al. [614] extract data from layout-aware document understanding models like unimodal or bimodal models." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 306, + 734, + 564, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 306, + 734, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 306, + 734, + 564, + 747 + ], + "type": "text", + "content": "Different Applications. Leveraging the abnormally high" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 42, + 301, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 42, + 301, + 227 + ], + "spans": [ + { + "bbox": [ + 52, + 42, + 301, + 227 + ], + "type": "text", + "content": "token probabilities, some works utilize the memorization of LLMs to extract the fingerprint or steganography [615]. Al-Kaswan et al. [616] explore memorization in large language models for code and find that code models memorize training data at a lower rate than natural language models. Nie et al. [617] utilize the token-level features derived from the identified characteristics to decode the PII. Lehman et al. [618] reveal the risk of Electronic Health Records leakage of LLMs. Diera et al. [619] conduct experiments to assess the PII leakage of fine-tuned BERT models and found that Differential Privacy (DP) has a negative effect when deployed in fine-tuning. Zhang et al. [620] propose data extraction attacks against text classification with transformers. Huang et al. [621] propose an evaluation tool, i.e. HCR, to assess the PPI leakage in Neural Code Completion Tools." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 42, + 228, + 301, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 42, + 228, + 301, + 376 + ], + "spans": [ + { + "bbox": [ + 42, + 228, + 301, + 376 + ], + "type": "inline_equation", + "content": "\\nrightarrow" + }, + { + "bbox": [ + 42, + 228, + 301, + 376 + ], + "type": "text", + "content": " Factor Assessment. Some work studies the factors of data extraction including decoding schemes, model sizes, prefix lengths, partial sequence leakages, and token positions [622, 623]. Yash et al. [624] explore the effects of prompt sensitivity and access to multiple checkpoints to extraction attacks. Staab et al. [625] construct a dataset consisting of real Reddit profiles to extract personal attributes. Xu et al. [626] conduct experiments to evaluate the factors of different suffix generation methods and different membership inference attacks in extraction performance. Karamolegkou et al. [627] evaluate the effect of model structure, data type, probing strategies, and metrics." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 378, + 301, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 378, + 301, + 586 + ], + "spans": [ + { + "bbox": [ + 44, + 378, + 301, + 586 + ], + "type": "text", + "content": "Prompt Stealing Attacks. Given that crafting effective prompts requires significant engineering effort and can be considered valuable intellectual property (IP), promptstealing attacks aim to compromise this IP by reconstructing prompts from generated responses [556, 557, 558]. These generation effects are often used to attract prospective prospective buyers. Sha et al. [556] pioneer this approach by collecting a dataset and training classifiers to predict prompt parameters—such as whether the prompt is direct, role-based, or in-context. They then used a large language model (LLM) to reconstruct the prompt. Similarly, Zhang et al. [557] trained an LLM on output-prompt pairs to directly infer the original prompt, while Yang et al. [558] leveraged generation differences to refine surrogate prompts. However, recovering the original prompt solely from the output is challenging. Out of this, Zheng et al. [628] propose a timing-based side-channel method to infer the prompt during inference." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 594, + 241, + 606 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 594, + 241, + 606 + ], + "spans": [ + { + "bbox": [ + 45, + 594, + 241, + 606 + ], + "type": "text", + "content": "6.1.2 Defensive Mechanisms in Deployment" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 608, + 300, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 608, + 300, + 723 + ], + "spans": [ + { + "bbox": [ + 44, + 608, + 300, + 723 + ], + "type": "text", + "content": "In Subsubsection 6.1.1, we analyzed various attack scenarios targeting individual LLM deployments. However, in real-world applications, defense mechanisms are not designed as isolated, one-to-one countermeasures against specific attacks. Instead, they follow fundamental security principles to establish a systematic defense framework, as illustrated in Figure 9. This framework integrates multiple layers of protection, ensuring resilience against a wide range of adversarial threats while maintaining model usability and efficiency." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 723, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 723, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 723, + 301, + 748 + ], + "type": "text", + "content": "Input Preprocessing Defenses Input preprocessing serves as the first line of defense in LLM deployment, aiming to" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 310, + 57, + 560, + 126 + ], + "blocks": [ + { + "bbox": [ + 310, + 46, + 467, + 57 + ], + "lines": [ + { + "bbox": [ + 310, + 46, + 467, + 57 + ], + "spans": [ + { + "bbox": [ + 310, + 46, + 467, + 57 + ], + "type": "text", + "content": "Defensive Mechanisms in Deployment" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 310, + 57, + 560, + 126 + ], + "lines": [ + { + "bbox": [ + 310, + 57, + 560, + 126 + ], + "spans": [ + { + "bbox": [ + 310, + 57, + 560, + 126 + ], + "type": "image", + "image_path": "f4ce229cb80a8c96656ded2aa655c39cbe884dd49ac1e9e0e1650264348f5451.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 133, + 564, + 158 + ], + "lines": [ + { + "bbox": [ + 309, + 133, + 564, + 158 + ], + "spans": [ + { + "bbox": [ + 309, + 133, + 564, + 158 + ], + "type": "text", + "content": "Fig. 9: The overview of attacks in single LLM's deployment phase." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 178, + 564, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 178, + 564, + 201 + ], + "spans": [ + { + "bbox": [ + 308, + 178, + 564, + 201 + ], + "type": "text", + "content": "detect and neutralize adversarial inputs before they reach the model." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 202, + 564, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 202, + 564, + 399 + ], + "spans": [ + { + "bbox": [ + 307, + 202, + 564, + 399 + ], + "type": "text", + "content": "Attack Detection & Identification: Effective input filtering [629, 630] begins with attack detection [631], which identifies adversarial prompts through statistical [632], structural [633], or behavioral inconsistencies [634]. Gradient-based detection methods [635] leverage safety-critical gradient analysis and loss landscape exploration to uncover jailbreak prompts that manipulate LLM behavior. These approaches identify adversarial inputs [636, 637] by analyzing how small perturbations [638] affect model outputs, detecting highly sensitive or misaligned gradients that indicate targeted attacks. Perplexity-based methods [632, 632] measure the probability distribution of input sequences, flagging atypical or low-likelihood prompts as potential adversarial inputs. These techniques are particularly effective in detecting prompt injection and adversarial perturbations, where crafted prompts deviate significantly from natural language distributions." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 399, + 564, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 399, + 564, + 491 + ], + "spans": [ + { + "bbox": [ + 307, + 399, + 564, + 491 + ], + "type": "text", + "content": "Beyond individual heuristics, universal detection frameworks [639] integrate multiple detection strategies to counter diverse attack vectors, including prompt injection [640], backdoor manipulations [641], and adversarial attacks [637]. These frameworks employ ensemble-based filtering mechanisms, combining gradient analysis [642], perplexity estimation [643], and syntactic evaluation for generalized attack resilience." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 492, + 564, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 492, + 564, + 689 + ], + "spans": [ + { + "bbox": [ + 307, + 492, + 564, + 689 + ], + "type": "text", + "content": "Semantic & Behavioral Analysis: Attack detection alone is insufficient, as certain adversarial inputs may bypass traditional filtering mechanisms. Semantic [644] and behavioral analysis enhance input preprocessing by evaluating linguistic intent and model alignment. Self-examination techniques allow LLMs [645, 646] to assess whether they are being manipulated, leveraging auxiliary reasoning steps to detect deceptive prompts. Alignment-based verification [647] ensures that the model's responses remain consistent with its safety objectives [330], identifying inputs that subtly nudge the model toward policy violations or ethical misalignment. Intention analysis [648, 649] further refines input filtering by discerning subtle manipulations designed to bypass explicit security checks. Unlike token-level detection, which flags overtly adversarial inputs, intention-aware defenses analyze the semantic structure and purpose of the input to preemptively reject jailbreak attempts." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 689, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 689, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 689, + 564, + 748 + ], + "type": "text", + "content": "Adversarial Defense & Mitigation: When detection and behavioral analysis fail to fully neutralize adversarial inputs, robustness-enhancing techniques [647] mitigate their effects by reducing model susceptibility to manipulation [334, 650]. Semantic smoothing [651, 652] techniques" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 112 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 112 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 112 + ], + "type": "text", + "content": "introduce controlled randomness into LLM responses, reducing the model's sensitivity to adversarial perturbations and preventing reliable jailbreak execution. By stabilizing decision boundaries [653], these methods enhance resistance against prompt manipulation strategies that exploit response predictability." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 112, + 301, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 112, + 301, + 204 + ], + "spans": [ + { + "bbox": [ + 44, + 112, + 301, + 204 + ], + "type": "text", + "content": "Preemptive input transformations [654], such as back-translation [655] or paraphrasing, modify incoming queries [651] while preserving semantic intent, disrupting adversarial structures embedded within malicious prompts. Data augmentation [656] and adversarial training further strengthen model robustness by exposing LLMs to adversarial prompts during training, forcing them to learn invariances that reduce their vulnerability to real-world attacks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 204, + 301, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 204, + 301, + 308 + ], + "spans": [ + { + "bbox": [ + 44, + 204, + 301, + 308 + ], + "type": "text", + "content": "Output Filtering Mechanisms. Output filtering mechanisms [212, 657] serve as a critical safeguard in LLM deployment, ensuring that generated responses comply with safety constraints while preserving informativeness. Unlike input preprocessing, which aims to prevent adversarial prompts from reaching the model, output filtering mitigates harmful content post-generation. Existing approaches primarily follow three paradigms: rule-based constraints, generative adversarial filtering, and toxicity detection." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 308, + 301, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 301, + 434 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 301, + 434 + ], + "type": "text", + "content": "Rule-based mechanisms [658] impose predefined constraints on model outputs, preventing the generation of harmful, unethical, or undesired content. Programmable guardrails [659] offer a structured framework where developers can enforce response filtering, topic restriction, and ethical alignment. These methods often integrate reinforcement learning from human feedback [155] or rule-based reward [660] modeling to refine output safety. While effective at handling explicit violations, static rule-based methods struggle with nuanced adversarial prompts and subtle misalignments." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 435, + 301, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 435, + 301, + 584 + ], + "spans": [ + { + "bbox": [ + 44, + 435, + 301, + 584 + ], + "type": "text", + "content": "To address these limitations, generative adversarial filtering [661] leverages self-critique [662, 663], ensemble detection, and dynamic response evaluation [664]. Self-rectification mechanisms [663, 665] enable LLMs to critique their own outputs and refine responses through iterative refinement. Additionally, ensemble-based [666] moderation models aggregate predictions from multiple LLMs, improving robustness against circumvention techniques. Adaptive filtering frameworks [667] employ perplexity-based assessments and adversarial perturbation detection to flag responses deviating from expected linguistic patterns, enhancing their resilience against jailbreak attempts [668, 669] and toxic content injection." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 585, + 301, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 585, + 301, + 723 + ], + "spans": [ + { + "bbox": [ + 44, + 585, + 301, + 723 + ], + "type": "text", + "content": "Toxicity detection [670, 671, 672] and content moderation [673, 674, 675, 676] further reinforce output safety by identifying and mitigating hate speech [677], misinformation, and other harmful content. Supervised finetuning adapts LLMs to recognize undesirable patterns, while classifier-based detection models [678] filter responses in real-time. Some approaches introduce debiasing strategies, such as controlled decoding [679, 680] and anti-expert guidance [681], to suppress toxic outputs without sacrificing response diversity. However, these methods face challenges in balancing false positives and false negatives, particularly in ambiguous or context-dependent cases." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 723, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 723, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 45, + 723, + 301, + 746 + ], + "type": "text", + "content": "The effectiveness of output filtering hinges on its ability to balance strict control with linguistic flexibility, ensur" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 42, + 564, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 564, + 168 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 564, + 168 + ], + "type": "text", + "content": "ing that models remain both safe and practically useful. A hybrid approach combining rule-based safeguards, self-correcting mechanisms, and adaptive toxicity moderation is essential to achieving robust and scalable LLM deployment. Robust Prompt Engineering. Robust prompt engineering aims to enhance LLM safety by designing input prompts that resist adversarial manipulation [682], protect sensitive data, and mitigate harmful outputs—all [683] without modifying model parameters. These strategies act at the interaction level, offering lightweight and model-agnostic protection." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 169, + 565, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 169, + 565, + 365 + ], + "spans": [ + { + "bbox": [ + 308, + 169, + 565, + 365 + ], + "type": "text", + "content": "Recent efforts have introduced prompt optimization techniques grounded in adversarial robustness, including embedding-space manipulation and defensive objective alignment. Methods such as Robust Prompt Optimization [684] and Prompt Adversarial Tuning generate transferable suffixes [668] or prefix [685] embeddings to guide model behavior [686] under attack [687], effectively lowering jailbreak success rates while preserving task performance. Similarly, goal prioritization frameworks [688] enforce inference-time objective consistency, dynamically resolving conflict between user instructions and safety constraints without requiring access to malicious samples. Complementary to these strategies, patch-based methods integrate interpretable suffixes or structured self-reminders [689] into prompts, reducing the model's susceptibility to coercive inputs through lightweight, modular defenses." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 365, + 565, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 365, + 565, + 435 + ], + "spans": [ + { + "bbox": [ + 308, + 365, + 565, + 435 + ], + "type": "text", + "content": "Structural manipulation approaches [690] neutralize adversarial intent through prompt rewriting. Spotlighting [691] injects source-attribute signals to counter indirect prompt injection, while inverse prompt engineering [692] repurposes attack data to generate task-specific defensive prompts under the principle of least privilege." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 435, + 565, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 435, + 565, + 573 + ], + "spans": [ + { + "bbox": [ + 308, + 435, + 565, + 573 + ], + "type": "text", + "content": "Privacy-preserving prompt [693] design introduces formal guarantees through differential privacy. Approaches like DP-Prompt [694] and stochastic gradient masking [695] reduce information leakage from prompts without harming performance. Desensitization and directional control of incontext representations offer additional privacy protections during prompt construction. Prompt engineering [579, 696] also helps mitigate societal risks. Chain-of-thought prompting and guided templates reduce gender bias [697] in reasoning tasks, while prompt learning [698] improves toxicity detection and generation control [699, 700], often surpassing specialized models in efficiency and generalization." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 573, + 564, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 573, + 564, + 643 + ], + "spans": [ + { + "bbox": [ + 308, + 573, + 564, + 643 + ], + "type": "text", + "content": "Finally, systematic prompt optimization methods [701, 702] aim to generalize prompt robustness across tasks and domains. Techniques like BATPrompt [703] and StraGo [704] use adversarial simulation and strategic decomposition to refine prompts iteratively, improving both resilience and effectiveness under variable inputs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 643, + 565, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 643, + 565, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 643, + 565, + 746 + ], + "type": "text", + "content": "System-level Security Controls. System-level defenses [705] enhance LLM deployment by optimizing inference, enforcing alignment, isolating untrusted inputs, and securing the supply chain. Systems like Petals [706], Sarathi-Serve [707], and DistServe [708] restructure computation to improve throughput and latency, while TriForce [709], Medusa [710] MagicDec [711] accelerate generation via speculative decoding and structural compression. Parallel frameworks such as DeepSpeed-FastGen [712] and SpecExec [713] further boost" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 192, + 53 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 192, + 53 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 192, + 53 + ], + "type": "text", + "content": "efficiency with minimal overhead." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 53, + 300, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 53, + 300, + 135 + ], + "spans": [ + { + "bbox": [ + 44, + 53, + 300, + 135 + ], + "type": "text", + "content": "Runtime alignment methods [714] adapt model behavior through cross-model guidance or token-level reward modeling. Systems such as SelfDefend [715] and Gradient Cuff [716] detect unsafe generation by monitoring agreement across models or loss landscapes, while Spotlighting [691] inserts provenance signals to mitigate indirect prompt injection." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 135, + 301, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 135, + 301, + 194 + ], + "spans": [ + { + "bbox": [ + 44, + 135, + 301, + 194 + ], + "type": "text", + "content": "Access isolation is achieved through policy enforcement [717] and system wrappers [688]. At the supply level, tools like MalHug [718] identify poisoned models, while system audits reveal sandbox and plugin vulnerabilities, highlighting the need for end-to-end secure deployment." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 194, + 300, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 194, + 300, + 299 + ], + "spans": [ + { + "bbox": [ + 44, + 194, + 300, + 299 + ], + "type": "text", + "content": "LLM-based guard models utilize lightweight LLMs like Llama Guard [330], Aegis Guard [719, 720], WildGuard [721], and ShieldGemma [722] to moderate both the input and output of the victim LLMs. However, they are purely classifiers. To solve this problem, the first reasoning-based guard model named GuardReasoner [723] is proposed to improve the performance, explainability, and generalization ability via learning to reason. It brings new opportunities for the safety of large-scale reasoning models [724]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 311, + 261, + 323 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 311, + 261, + 323 + ], + "spans": [ + { + "bbox": [ + 45, + 311, + 261, + 323 + ], + "type": "text", + "content": "6.1.3 Evaluation and Benchmarks in Deployment" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 326, + 301, + 385 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 326, + 301, + 385 + ], + "spans": [ + { + "bbox": [ + 44, + 326, + 301, + 385 + ], + "type": "text", + "content": "To assess the reliability and safety of LLMs after deployment, evaluation efforts focus on several key dimensions and risk types, as illustrated in Figure 10. These dimensions guide the design of systematic benchmarks and metrics tailored for real-world deployment settings." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 47, + 398, + 299, + 517 + ], + "blocks": [ + { + "bbox": [ + 47, + 398, + 299, + 517 + ], + "lines": [ + { + "bbox": [ + 47, + 398, + 299, + 517 + ], + "spans": [ + { + "bbox": [ + 47, + 398, + 299, + 517 + ], + "type": "image", + "image_path": "7e5d6796694a8d1a706054c8a700440e2e26505bd752ab4e4efae519e5f05197.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 524, + 292, + 548 + ], + "lines": [ + { + "bbox": [ + 54, + 524, + 292, + 548 + ], + "spans": [ + { + "bbox": [ + 54, + 524, + 292, + 548 + ], + "type": "text", + "content": "Fig. 10: The overview of evaluation and benchmarks in single LLM's deployment phase." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 559, + 299, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 559, + 299, + 571 + ], + "spans": [ + { + "bbox": [ + 45, + 559, + 299, + 571 + ], + "type": "text", + "content": "Robustness Evaluation. To systematically assess the relia-" + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 46, + 607, + 299, + 744 + ], + "blocks": [ + { + "bbox": [ + 48, + 580, + 297, + 604 + ], + "lines": [ + { + "bbox": [ + 48, + 580, + 297, + 604 + ], + "spans": [ + { + "bbox": [ + 48, + 580, + 297, + 604 + ], + "type": "text", + "content": "TABLE 7: Summary of LLM robustness benchmarks at the deployment stage." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 46, + 607, + 299, + 744 + ], + "lines": [ + { + "bbox": [ + 46, + 607, + 299, + 744 + ], + "spans": [ + { + "bbox": [ + 46, + 607, + 299, + 744 + ], + "type": "table", + "html": "
BenchmarkAdversarialNaturalJailbreakToxicity
JailbreakBench [306]
HarmBench [305]
JAMBench [725]
JailbreakEval [726]
Latent Jailbreak [727]
PromptRobust [728]
SelfPrompt [729]
Chen et al. [730]
Chu et al. [731]
AdvGLUE [732]
AdvGLUE++ [333]
NoiseLLM [733]
NEO-BENCH [734]
CompressionEval [735]
", + "image_path": "5e09ecec36584d1cf538edd7e63fbbb1fa61ac0fa57f6f3644e5882b53355973.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 42, + 564, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 564, + 169 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 564, + 169 + ], + "type": "text", + "content": "bility of large language models (LLMs) after deployment, we categorize robustness evaluation into two broad types: adversarial robustness and natural robustness. Adversarial robustness focuses on evaluating how LLMs respond to malicious or adversarial inputs, such as jailbreak prompts, prompt injections, or red-teaming attacks. Natural robustness, on the other hand, assesses LLM behavior under nonmalicious but realistic distribution shifts, including typos, paraphrasing, novel word usage, or temporal drift. A summary of representative benchmarks categorized along these 4 dimensions is presented in Table 7." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 169, + 564, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 169, + 564, + 550 + ], + "spans": [ + { + "bbox": [ + 307, + 169, + 564, + 550 + ], + "type": "text", + "content": "Adversarial Robustness: A range of benchmarks and frameworks have been proposed for adversarial robustness. JailbreakBench [306] provides a standardized evaluation suite for jailbreak attacks, containing 100 misuse behaviors and an evolving repository of adversarial prompts. HarmBench [305] proposes a comprehensive red-teaming evaluation framework that includes 510 harmful behaviors spanning diverse semantic and functional categories, supporting both text-only and multimodal inputs across 33 LLMs. JAMBench [725] targets the evaluation of moderation guardrails using 160 carefully constructed prompts across four major risk categories and introduces a cipher-character-based attack. JailbreakEval [726] offers a unified toolkit for jailbreak assessment with string-matching, classifier-based, and LLM-based evaluators. Latent Jailbreak [727] focuses on detecting embedded malicious intent in seemingly benign prompts and evaluates instruction-following robustness using a hierarchical annotation scheme. PromptRobust [728] benchmarks prompt-level robustness with character, word, sentence, and semantic-level perturbations across 13 datasets and 8 NLP tasks. SelfPrompt [729] enables autonomous robustness evaluation through knowledge-guided prompt generation and LLM-based self-assessment. Chu et al. [731] conduct a large-scale comparison of 17 jailbreak attacks on 8 LLMs and 160 forbidden prompts, proposing a unified taxonomy and benchmarking various defenses. Chen et al. [730] propose a multi-dimensional framework assessing jailbreak reliability over 13 LLMs and 1,525 prompts, integrating metrics such as attack success rate (ASR), toxicity, fluency, and grammatically. Zhang et al. [736] propose a novel definition and benchmark for LLM's content moderation based on a sensitive-semantic perspective." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 550, + 564, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 550, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 307, + 550, + 564, + 747 + ], + "type": "text", + "content": "Natural Robustness: Several benchmarks focus on evaluating LLMs under realistic but benign input perturbations or distribution shifts. AdvGLUE [732] and AdvGLUE++ [333] extend the original GLUE benchmark [737] with semantically-preserving perturbations at logic, word, and sentence levels. NoiseLLM [733] presents a unified framework for evaluating slot-filling robustness under character-, word-, and sentence-level noise, including typos and paraphrases. NEO-BENCH [734] assesses robustness to temporal drift by introducing neologisms into tasks such as machine translation, classification, and question answering. CompressionEval [735] provides a prompt-free evaluation framework using lossless compression to assess generalization and robustness, comparing LLM performance on content before and after the model's knowledge cutoff. These benchmarks offer complementary perspectives for assessing LLM performance under both malicious and naturally" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 43, + 161, + 53 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 43, + 161, + 53 + ], + "spans": [ + { + "bbox": [ + 45, + 43, + 161, + 53 + ], + "type": "text", + "content": "occurring input variations." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 53, + 300, + 65 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 53, + 300, + 65 + ], + "spans": [ + { + "bbox": [ + 45, + 53, + 300, + 65 + ], + "type": "text", + "content": "Content Trustfulness and Fairness Evaluation. Beyond ro" + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 46, + 100, + 301, + 209 + ], + "blocks": [ + { + "bbox": [ + 53, + 73, + 292, + 98 + ], + "lines": [ + { + "bbox": [ + 53, + 73, + 292, + 98 + ], + "spans": [ + { + "bbox": [ + 53, + 73, + 292, + 98 + ], + "type": "text", + "content": "TABLE 8: Summary of content trustfulness and fairness evaluation benchmarks for LLMs at deployment stage." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 46, + 100, + 301, + 209 + ], + "lines": [ + { + "bbox": [ + 46, + 100, + 301, + 209 + ], + "spans": [ + { + "bbox": [ + 46, + 100, + 301, + 209 + ], + "type": "table", + "html": "
BenchmarkHallucinationFactualityToxicityBiasDiscrimination
HaluEval [738]
Med-HALT [739]
ANAH [740]
SelfCheckGPT [741]
DoLa [742]
Mundler et al. [743]
Elaraby et al. [744]
Ji et al. [745]
Zhang et al. [746]
Guo et al. [747]
RTP-LX [748]
ROBBIE [749]
CEB [750]
", + "image_path": "494c56ce4c14d6ec67999ba68c4f9c1261aae972f1017e76671270ae0d772dfb.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 216, + 301, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 216, + 301, + 308 + ], + "spans": [ + { + "bbox": [ + 44, + 216, + 301, + 308 + ], + "type": "text", + "content": "bustness, a key dimension of deployment-stage evaluation concerns the trustfulness and fairness of LLM-generated content. This includes detecting and mitigating outputs that are factually incorrect (hallucinations), misleading (low factuality), harmful (toxic), or unfair (biased or discriminatory). We categorize existing benchmarks into five axes: hallucination, factuality, toxicity, bias, and discrimination, and summarize representative works in Table 8." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 308, + 301, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 308, + 301, + 492 + ], + "spans": [ + { + "bbox": [ + 44, + 308, + 301, + 492 + ], + "type": "text", + "content": "Benchmarks in this space target either the accuracy of generated content or its alignment with human values. For hallucination and factuality evaluation, HaluEval [738] and MedHALT [739] provide reference-based hallucination annotations in general and medical domains, respectively, while ANAH [740] delivers fine-grained, human-annotated hallucination labels with correction spans. SelfCheckGPT [741] detects hallucinations via consistency checks across multiple generations, and DoLa [742] proposes a decoding strategy that contrasts internal layer activations to reduce factual errors. Other works such as Mundler et al. [743], Elaraby et al. [744], and Ji et al. [745] leverage taxonomic definitions or internal model signals to quantify or predict hallucination risk. Zhang et al. [746] introduce FEWL, a reference-free evaluation framework that uses agreement across reference LLMs to approximate hallucination likelihood." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 492, + 301, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 492, + 301, + 573 + ], + "spans": [ + { + "bbox": [ + 44, + 492, + 301, + 573 + ], + "type": "text", + "content": "In terms of toxicity detection, Guo et al. [747] show that role-playing prompts (persons) can elicit toxic behavior from ChatGPT, and RTP-LX [748] evaluates multilingual LLMs in detecting culturally sensitive harm. Both studies reveal that current LLMs remain vulnerable to subtle toxic or culturally biased outputs, especially in low-resource languages or when confronted with indirect harm." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 573, + 301, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 573, + 301, + 666 + ], + "spans": [ + { + "bbox": [ + 44, + 573, + 301, + 666 + ], + "type": "text", + "content": "For evaluating social bias and discrimination, ROBBIE [749] benchmarks LLMs across 12 demographic axes with template-based prompts and multiple toxicity and regard metrics, covering gender, race, religion, and intersections thereof. CEB [750] proposes a compositional taxonomy for fairness evaluation and introduces multiple new datasets spanning stereotyping, toxicity, and classification bias, supporting both direct and indirect evaluation modes." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 44, + 666, + 301, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 666, + 301, + 734 + ], + "spans": [ + { + "bbox": [ + 44, + 666, + 301, + 734 + ], + "type": "text", + "content": "These benchmarks collectively provide a multidimensional view of content trustfulness and fairness, enabling the systematic evaluation of LLMs beyond syntactic correctness or surface fluency. As safety-critical deployment scenarios become increasingly prevalent, such evaluation tools play a central role in ensuring the responsible use of LLMs." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 734, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 734, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 45, + 734, + 301, + 746 + ], + "type": "text", + "content": "Data Privacy and Leakage Evaluation. Data privacy is" + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 318, + 73, + 553, + 186 + ], + "blocks": [ + { + "bbox": [ + 313, + 38, + 561, + 63 + ], + "lines": [ + { + "bbox": [ + 313, + 38, + 561, + 63 + ], + "spans": [ + { + "bbox": [ + 313, + 38, + 561, + 63 + ], + "type": "text", + "content": "TABLE 9: Summary of privacy evaluation benchmarks for LLMs at the deployment stage." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 73, + 553, + 186 + ], + "lines": [ + { + "bbox": [ + 318, + 73, + 553, + 186 + ], + "spans": [ + { + "bbox": [ + 318, + 73, + 553, + 186 + ], + "type": "table", + "html": "
BenchmarkPIIMIAEIACompliance
PrivLM-Bench [751]
LLM-PBE [752]
PrivAuditor [753]
Rossi et al. [754]
Whispered Tuning [755]
ProPILE [103]
PrivaCI-Bench [756]
Commercial Audit [757]
LessLeak-Bench [758]
SecureSQL [759]
DecodingTrust [333]
", + "image_path": "507c336b5a2d24ce18489c83891919090519cf1f20f6a7cceb030ba324f22d7d.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 204, + 564, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 204, + 564, + 274 + ], + "spans": [ + { + "bbox": [ + 308, + 204, + 564, + 274 + ], + "type": "text", + "content": "a critical dimension in evaluating the trustworthiness of LLMs at deployment. Table 9 summarizes representative benchmarks that assess privacy risks along four axes: personally identifiable information (PII) leakage, membership inference attacks (MIA), embedding inversion attacks (EIA), and regulatory or contextual compliance." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 274, + 564, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 274, + 564, + 365 + ], + "spans": [ + { + "bbox": [ + 308, + 274, + 564, + 365 + ], + "type": "text", + "content": "PrivLM-Bench [751] and LLM-PBE [752] offer comprehensive multi-level evaluations spanning all three major attack types. PrivAuditor [753] and Rossi et al. [754] focus on adaptation-stage vulnerabilities across a variety of finetuning techniques. Whispered Tuning [755] proposes a differential privacy-based training scheme to reduce leakage, while ProPILE [103] tests whether LLMs can reconstruct sensitive information from prompts related to known users." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 365, + 564, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 365, + 564, + 470 + ], + "spans": [ + { + "bbox": [ + 308, + 365, + 564, + 470 + ], + "type": "text", + "content": "PrivaCI-Bench [756] and Commercial Audit [757] emphasize regulatory compliance, evaluating model behavior against privacy expectations and legal frameworks such as GDPR and the EU AI Act. SecureSQL [759] examines leakage in structured query generation, and LessLeak-Bench [758] reveals code-specific leakage across software engineering benchmarks. Finally, DecodingTrust [333] includes privacy as part of a broader trustworthiness suite, auditing GPT models across multiple risk dimensions." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 470, + 564, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 470, + 564, + 504 + ], + "spans": [ + { + "bbox": [ + 308, + 470, + 564, + 504 + ], + "type": "text", + "content": "Together, these benchmarks provide a foundation for assessing LLM privacy risks across diverse modalities, attack surfaces, and deployment scenarios." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 504, + 564, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 504, + 564, + 596 + ], + "spans": [ + { + "bbox": [ + 308, + 504, + 564, + 596 + ], + "type": "text", + "content": "Multi-modal Safety Evaluations As multimodal large language models (MLLMs) become increasingly integrated into real-world applications, ensuring their safety under diverse input conditions is essential. A growing number of studies have proposed evaluation benchmarks and frameworks to assess MLLM vulnerabilities across multiple dimensions [760, 761, 762, 763, 764, 765, 766, 767, 768, 769, 770, 771, 772, 773, 774, 775, 776, 777, 778, 779, 780, 781, 782]." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 596, + 564, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 596, + 564, + 723 + ], + "spans": [ + { + "bbox": [ + 307, + 596, + 564, + 723 + ], + "type": "text", + "content": "Jailbreak evaluation has received significant attention, with benchmarks such as MM-SafetyBench [760] and Jailbreakv-28k [761] targeting harmful instruction-following behaviors. MMJ-Bench [762] and Retention Score [763] further extend jailbreak assessment to include visual robustness and long-term safety retention. For hallucination, several works diagnose MLLM failures arising from inconsistencies between visual inputs and generated text, including HallusionBench [764], POPE [765], and Bingo [766]. SIUO [767] complements this direction by evaluating cross-modality consistency under seemingly benign inputs." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 308, + 723, + 564, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 723, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 723, + 564, + 746 + ], + "type": "text", + "content": "Robustness under adversarial visual corruption is assessed in MVTamperBench [768] and B-AviBench [769]," + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 41, + 301, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 41, + 301, + 133 + ], + "spans": [ + { + "bbox": [ + 44, + 41, + 301, + 133 + ], + "type": "text", + "content": "which introduce perturbed or misleading visual stimuli to test model stability. Meanwhile, fairness and social bias have been evaluated through VIVA [770], GenderBiasVL [771], FACET [772], FairDeDup [773], CounterBias [774], PAIRS [775], DeAR [776], and MMBias [777], covering gender, racial, and intersectional dimensions using parallel image sets, counterfactual probing, and real-world dataset imbalances." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 135, + 301, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 135, + 301, + 217 + ], + "spans": [ + { + "bbox": [ + 44, + 135, + 301, + 217 + ], + "type": "text", + "content": "To unify these evaluation directions, several comprehensive frameworks have emerged. MultiTrust [778] and SPAVL [779] aim to benchmark MLLMs across diverse safety criteria, including robustness, fairness, and harmfulness. Q-Eval-100K [780] complements these efforts by focusing on visual generation quality and alignment under instruction-following settings." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 217, + 301, + 264 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 217, + 301, + 264 + ], + "spans": [ + { + "bbox": [ + 44, + 217, + 301, + 264 + ], + "type": "text", + "content": "Collectively, these benchmarks highlight the unique challenges posed by multimodal interactions and the growing need for holistic, scalable safety evaluations tailored to MLLMs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 288, + 160, + 300 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 288, + 160, + 300 + ], + "spans": [ + { + "bbox": [ + 45, + 288, + 160, + 300 + ], + "type": "text", + "content": "6.2 Single-agent Safety" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 307, + 301, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 307, + 301, + 469 + ], + "spans": [ + { + "bbox": [ + 44, + 307, + 301, + 469 + ], + "type": "text", + "content": "In this section, we focus on security issues related to a single agent. We first define an agent as an interactive entity that uses an LLM as the core for reasoning, decision-making, and reflection while integrating memory, tools, and the environment as capability-enhancing components. Beyond the deployment risks associated with the LLM core, we introduce the security issues arising from these three additional modules. Specifically, for tools (Section 6.2.2) and memory (Section 6.2.3), we summarize existing work from both attack (Section 6.2.4) and defense (Section 6.2.5) perspectives to identify technical paradigms. For the environment (Section 6.2.6), we explore unique security challenges from the perspective of various agent-interaction settings. We demonstrate an overview of agent safety in Figure 12." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 486, + 159, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 486, + 159, + 498 + ], + "spans": [ + { + "bbox": [ + 45, + 486, + 159, + 498 + ], + "type": "text", + "content": "6.2.1 Definition of Agent" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 503, + 302, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 503, + 302, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 503, + 302, + 748 + ], + "type": "text", + "content": "LLM-driven agent refers to an AI system capable of operating independently or with limited human oversight, where a sophisticated language model [6, 783, 784, 785] serves as the foundational intelligence for processing inputs, executing tasks, and engaging in interactions. By leveraging advanced natural language understanding and generation, such agents [29, 786, 787, 788, 789] can analyze information, resolve queries, and adapt to user or environmental inputs [790, 791, 792]. To extend their functionality, they frequently incorporate supplementary mechanisms—such as data storage modules [23, 793, 794, 795], external software interfaces [790, 796, 797], or strategic reasoning frameworks [798]—allowing them to transcend basic text production. This adaptability makes them valuable for diverse implementations, including interactive dialogue systems [799], workflow optimization [800, 801, 802, 803], and complex decision-making scenarios [804]. In this study, we focus on deconstructing agent safety into three critical dimensions: tool utilization, memory management, and environment-specific security concerns. We demonstrate the components and structures of agent systems in Figure 11." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 42, + 391, + 54 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 42, + 391, + 54 + ], + "spans": [ + { + "bbox": [ + 309, + 42, + 391, + 54 + ], + "type": "text", + "content": "6.2.2 Tool Safety" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 308, + 60, + 566, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 60, + 566, + 281 + ], + "spans": [ + { + "bbox": [ + 308, + 60, + 566, + 281 + ], + "type": "text", + "content": "Some works enable LLM agents to learn how to use tools by generating datasets and fine-tuning the model for API usage [25, 805]. Specifically, tools can be implemented in various forms, including but not limited to code-based API functions (e.g., search engine [806] and calculator), embodied intelligence like robotic arms [807], and more. A tool serves as a bidirectional medium: on one hand, it allows the agent to map internal decisions into actions within the interactive environment; on the other hand, it also acts as a means for the agent to collect information from the external world. Given the pivotal role of tools in agent components, the related security issues are worth exploring [74]. For example, in the field of web security, Fang et al. [808, 809] investigate how autonomous agents, when equipped with appropriate tools, can independently compromise websites and exploit one-day vulnerabilities in real-world systems without human intervention. Next, we will summarize and discuss existing research from attack perspectives and figure out the lack of tool invocation defense in current research." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 281, + 566, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 281, + 566, + 455 + ], + "spans": [ + { + "bbox": [ + 308, + 281, + 566, + 455 + ], + "type": "text", + "content": "Attacks. Based on the target of the attack, safety-related attacks involving tools can be categorized into Tool-aided Attacks and Tool-targeted Attacks. The former refers to attackers utilizing agents equipped with tools to execute attacks that LLMs cannot independently assist with, such as leveraging agents with web access and code execution capabilities to facilitate cyberattacks. The latter involves attackers targeting the tool invocation process itself, attempting to manipulate or induce tool selection for malicious purposes through various attack methods. However, from the perspective of the technical stack of attacks, the two can be unified. We have identified new applications of traditional LLM attack methods in tool safety, as well as novel attack paradigms that have emerged due to the unique characteristics of tools." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 456, + 566, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 456, + 566, + 595 + ], + "spans": [ + { + "bbox": [ + 308, + 456, + 566, + 595 + ], + "type": "text", + "content": "Jailbreak. Similar to jailbreak methods in LLM safety, agent jailbreak also bypasses the agent's built-in safety mechanisms through specific prompts to elicit malicious responses. However, in the agent scenario, the malicious behaviors it aims to induce are different. Specifically, Cheng et al. [810] manually craft jailbreak prompts to extract personal information from the training data of code-generation agents. In contrast, Fu et al. [811] and Imprompter [812] both employ gradient-based optimization like GCG [260] to automatically generate input prompts or images that manipulate agents into leveraging tools for privacy breaches in dialogues or executing harmful actions on user resources." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 596, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 596, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 596, + 566, + 748 + ], + "type": "text", + "content": "Injection. This type of attack can be summarized into two forms of injection: Prompt Injection (similar to LLM safety vulnerabilities) where malicious instructions are embedded in input data, exploiting the difficulty LLMs face in distinguishing between instructions and data. Another form is Tool Injection where malicious tools are injected to enable further exploitation, such as using the tool to execute malicious actions. For example, BreakingAgents [813] utilizes human-crafted prompt injections to execute malfunction attacks, causing agents to engage in repetitive or irrelevant actions, with additional exploration into the propagation of such attacks within Multi-Agent Systems (MAS). ToolCommander [814] is the second type. It proposes a two-stage" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 46, + 41, + 565, + 278 + ], + "blocks": [ + { + "bbox": [ + 46, + 41, + 565, + 278 + ], + "lines": [ + { + "bbox": [ + 46, + 41, + 565, + 278 + ], + "spans": [ + { + "bbox": [ + 46, + 41, + 565, + 278 + ], + "type": "image", + "image_path": "cf762587d56c382c4c037d7bf5aac6c071b7d0e9976abb0ab003388647d5eb60.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 145, + 278, + 463, + 291 + ], + "lines": [ + { + "bbox": [ + 145, + 278, + 463, + 291 + ], + "spans": [ + { + "bbox": [ + 145, + 278, + 463, + 291 + ], + "type": "text", + "content": "Fig. 11: The overview of LLM-based single-agent and multi-agent systems." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 311, + 300, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 311, + 300, + 357 + ], + "spans": [ + { + "bbox": [ + 44, + 311, + 300, + 357 + ], + "type": "text", + "content": "attack strategy: first, injecting malicious tools to steal user queries, and subsequently manipulating tool selection using the stolen data, thereby achieving privacy theft and denial-of-service attacks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 360, + 300, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 360, + 300, + 533 + ], + "spans": [ + { + "bbox": [ + 44, + 360, + 300, + 533 + ], + "type": "text", + "content": "Backdoor. Backdoor attacks also find utility in the context of agent safety, but unlike LLMs, LLM agents develop diverse verbal reasoning traces through continuous environmental interactions, broadening potential backdoor attack vectors. Yang et al. [815] define two types of backdoor attacks, targeting either the final returned results or the intermediate processes of the attacking agent, and implement the above variations of agent backdoor attacks on two typical agent tasks, including web shopping and tool utilization. Furthermore, DemonAgent [816] decomposes a backdoor into multiple sub-backdoor fragments to poison the agent's tools. Beyond intentional guidance, studies such as BadAgent [817] highlight that backdoor attacks can inadvertently prompt agents to misuse tools for malicious purposes." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 536, + 299, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 536, + 299, + 651 + ], + "spans": [ + { + "bbox": [ + 44, + 536, + 299, + 651 + ], + "type": "text", + "content": "Manipulation. This type of attack refers to directly or indirectly manipulating or altering the tool's returned content to leak sensitive information or carry out malicious actions. AUTOCMD [818] employs a separate LLM, trained on tool-calling datasets and fine-tuned with target-specific examples, to generate and replicate legitimate commands for extracting sensitive information from tools. Meanwhile, Zhao et al. [819] manipulate third-party API outputs by injecting malicious content or omitting critical information, ultimately causing erroneous or biased system behaviors." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 654, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 654, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 654, + 301, + 748 + ], + "type": "text", + "content": "Defenses. Compared to attacks on agent tools, defense mechanisms for secure tool invocation have been less studied. Specifically, AgentGuard [820] employs LLM orchestrators to automatically detect unsafe tool-use workflows and produce safety constraints for secure tool utilization. PrivacyAsst [821] proposes an encryption-based solution by integrating an encryption scheme into the tool using LLM agents to safeguard user privacy and align them" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 311, + 564, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 311, + 564, + 370 + ], + "spans": [ + { + "bbox": [ + 308, + 311, + 564, + 370 + ], + "type": "text", + "content": "with computational security standards. In addition, some works enhance the security of agent systems by leveraging tool invocation, GuardAgent [822] pioneers an approach to verify target agents' trustworthiness by executing guardrail code through API calls during task plan implementation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 381, + 408, + 393 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 381, + 408, + 393 + ], + "spans": [ + { + "bbox": [ + 309, + 381, + 408, + 393 + ], + "type": "text", + "content": "6.2.3 Memory Safety" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 396, + 564, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 396, + 564, + 535 + ], + "spans": [ + { + "bbox": [ + 307, + 396, + 564, + 535 + ], + "type": "text", + "content": "The memory mechanism in LLM agents enables them to retain historical behaviors, thereby enhancing future decision-making capabilities. Typically, agent memory can be categorized into long-term and short-term memory systems. The long-term memory module commonly employs Retrieval-Augmented Generation (RAG) [823, 824] technology to facilitate precise information retrieval, while the short-term memory stores real-time data to support immediate conversational contexts and task execution. While these memory modules significantly improve agent functionality, they simultaneously introduce potential security vulnerabilities, making the system susceptible to malicious attacks." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 309, + 546, + 370, + 557 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 546, + 370, + 557 + ], + "spans": [ + { + "bbox": [ + 309, + 546, + 370, + 557 + ], + "type": "text", + "content": "6.2.4 Attack" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 561, + 564, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 561, + 564, + 596 + ], + "spans": [ + { + "bbox": [ + 308, + 561, + 564, + 596 + ], + "type": "text", + "content": "Follow the trustworthy issues in [74], we categorize attacks related to memory into three types: Memory Poisoning, Privacy Leakage, and Memory Misuse." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 596, + 564, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 596, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 307, + 596, + 564, + 746 + ], + "type": "text", + "content": "(I) Memory Poisoning refers to adversarial attacks where malicious data is injected into an agent's long-term memory [313, 825, 826, 827, 828, 829]. When the agent retrieves and utilizes such corrupted memory, it may produce erroneous outputs, misleading responses, or even hazardous actions. For example, PoisonedRAG framework [827] employs a dual optimization approach, simultaneously manipulating both the retrieval and generation pipelines to systematically poison the agent's memory system. AgentPoison [826] introduces an advanced backdoor attack methodology that optimizes trigger patterns and seamlessly integrates them into query formulations, significantly elevating the likelihood of malicious sample retrieval" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 48, + 555, + 294 + ], + "blocks": [ + { + "bbox": [ + 53, + 48, + 555, + 294 + ], + "lines": [ + { + "bbox": [ + 53, + 48, + 555, + 294 + ], + "spans": [ + { + "bbox": [ + 53, + 48, + 555, + 294 + ], + "type": "image", + "image_path": "a2b149c02628f0cb46be90a88c408bf263d347f8b42ba68b6b83ded7364f1a70.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 167, + 299, + 442, + 312 + ], + "lines": [ + { + "bbox": [ + 167, + 299, + 442, + 312 + ], + "spans": [ + { + "bbox": [ + 167, + 299, + 442, + 312 + ], + "type": "text", + "content": "Fig. 12: The overview of the safety of LLM-based agent systems." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 332, + 301, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 332, + 301, + 460 + ], + "spans": [ + { + "bbox": [ + 44, + 332, + 301, + 460 + ], + "type": "text", + "content": "while maintaining stealth. (II) Privacy Leakage occurs when attackers exploit the interface between an agent and its long-term memory to extract stored sensitive data [520, 605, 607, 830, 831]. Such breaches may expose user information to malicious third parties, posing significant real-world risks. (II) Memory Misuse refers to the deliberate construction of multi-turn query sequences that systematically circumvent safety protocols by exploiting the retention properties of agent short-term memory [752, 832, 833, 834, 835, 836]. This attack vector enables progressive erosion of defensive measures through iterative interaction patterns." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 468, + 115, + 479 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 468, + 115, + 479 + ], + "spans": [ + { + "bbox": [ + 45, + 468, + 115, + 479 + ], + "type": "text", + "content": "6.2.5 Defense" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 481, + 300, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 481, + 300, + 609 + ], + "spans": [ + { + "bbox": [ + 44, + 481, + 300, + 609 + ], + "type": "text", + "content": "To counter these attacks, various defense approaches have been developed to enhance the robustness of memory systems [520, 835, 837, 838, 839]. (I) Detection Detection mechanisms primarily focus on identifying and eliminating malicious content retrieved from long-term memory systems [835, 838, 839?]. (II) Prompt Modification involves strategically rewriting user queries before processing by the agent to enhance response safety [520, 835]. (III) Output Intervention involves real-time monitoring and modification of agent responses prior to delivery to ensure safety and accuracy [825, 840]." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 617, + 163, + 629 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 617, + 163, + 629 + ], + "spans": [ + { + "bbox": [ + 45, + 617, + 163, + 629 + ], + "type": "text", + "content": "6.2.6 Environment Safety" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 631, + 300, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 631, + 300, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 631, + 300, + 746 + ], + "type": "text", + "content": "Agents operate within dynamic and heterogeneous environments, spanning physical and digital domains [841, 842, 843]. Their interaction with these environments is a multistep process [844, 845]. First, agents engage in perception, gathering data from sources like sensors in a physical setup or digital platforms [806]. This perceived data is then analyzed using various algorithms and reasoning mechanisms to identify patterns and potential actions [846]. Based on this analysis, agents take action, which can either directly influence the environment, like an autonomous vehicle making" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 332, + 564, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 332, + 564, + 357 + ], + "spans": [ + { + "bbox": [ + 308, + 332, + 564, + 357 + ], + "type": "text", + "content": "a lane change [847], or modify their own internal state, such as a software agent updating its knowledge base [848]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 358, + 564, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 358, + 564, + 498 + ], + "spans": [ + { + "bbox": [ + 307, + 358, + 564, + 498 + ], + "type": "text", + "content": "However, this interaction is plagued by trustworthiness challenges. There are security risks in every process of interaction with the environment [849]. Agent roles and environmental constraints contribute to risks such as autonomous driving errors [850] and network disruptions [806, 851]. Given the diverse dynamic scenarios and related issues [849, 852, 853], the existing solutions are fragmented and lack a systematic framework. Thus, we will explore trustworthiness and security aspects by categorizing relevant papers according to whether they focus on ensuring safety in the perception, analysis, or action phase of the agent-environment interaction, as illustrated in Figure 10." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 500, + 564, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 500, + 564, + 697 + ], + "spans": [ + { + "bbox": [ + 307, + 500, + 564, + 697 + ], + "type": "text", + "content": "Perception. The perception phase serves as the foundational layer of agent-environment interaction, where agents acquire raw data to interpret their surroundings. However, this phase is inherently vulnerable to risks such as data poisoning, environmental noise, and biased observations. Hudson [841] converts real-time sensory inputs into natural language representations augmented with security validation protocols, employing causal analysis techniques to improve reliability during adversarial perception scenarios. ChatScene [847] develops safety-oriented simulation environments for autonomous systems by converting linguistic commands into executable code compatible with CARLA's simulation architecture. Chen et al. [854] systematically categorize perceptual vulnerabilities in financial AI systems, identifying three primary risk categories: synthetic data generation errors, temporal inconsistency challenges, and susceptibility to engineered input manipulations." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 700, + 565, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 700, + 565, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 700, + 565, + 746 + ], + "type": "text", + "content": "Reasoning. The reasoning phase transforms raw perceptual data into actionable insights through decision-making models, and knowledge-based inference. This stage is critical to ensure agents act appropriately in dynamic environments," + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 45, + 299, + 162 + ], + "blocks": [ + { + "bbox": [ + 47, + 45, + 299, + 162 + ], + "lines": [ + { + "bbox": [ + 47, + 45, + 299, + 162 + ], + "spans": [ + { + "bbox": [ + 47, + 45, + 299, + 162 + ], + "type": "image", + "image_path": "9deb0bb23bd9a7df4575cdd26e5b8aac051c2140a99729831eb7c59ed428f59b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 69, + 168, + 277, + 192 + ], + "lines": [ + { + "bbox": [ + 69, + 168, + 277, + 192 + ], + "spans": [ + { + "bbox": [ + 69, + 168, + 277, + 192 + ], + "type": "text", + "content": "Fig. 13: The overview of agent and environment interactions." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 214, + 301, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 214, + 301, + 376 + ], + "spans": [ + { + "bbox": [ + 44, + 214, + 301, + 376 + ], + "type": "text", + "content": "but introduces unique trustworthiness challenges. Yang et al. [846] develop a temporal safety verification framework using formal logic systems, implementing dual mechanisms for auditing the compliance of safety protocols and filtration of hazardous decisions to meet the requirements of industrial robotics. Agents4PLC [855] establishes an industrial control programming framework that combines automated code synthesis with formal verification processes, integrating RAG [235] and COT [343] to ensure operational integrity. Xiang et al. [822] propose medical AI systems that employ semantic reasoning engines for confidential data protection. Park et al. [845] demonstrate improved threat detection capabilities through simulated organizational communication patterns in anomaly identification systems." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 377, + 301, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 377, + 301, + 574 + ], + "spans": [ + { + "bbox": [ + 44, + 377, + 301, + 574 + ], + "type": "text", + "content": "Action. The action phase represents the culmination of agent-environment interaction, where agents execute decisions to influence their surroundings or update internal states. Trustworthiness at this stage hinges on ensuring that actions are safe, precise, and aligned with intended objectives. Fang et al. [851] reveal the capacity of autonomous systems to exploit digital infrastructure weaknesses through adaptive penetration testing, prompting the development of specialized evaluation frameworks for web agents. Furthermore, researchers develop frameworks to evaluate the truthfulness of web agents. Polaris [856] implements distributed AI architectures to enhance fault tolerance and response accuracy of healthcare interaction systems. La et al. [857] employ linguistic evolution models to simulate adaptive content generation patterns that circumvent automated moderation systems, providing insights for regulatory mechanism improvements." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 591, + 153, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 591, + 153, + 604 + ], + "spans": [ + { + "bbox": [ + 45, + 591, + 153, + 604 + ], + "type": "text", + "content": "6.3 Multi-agent Safety" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 608, + 300, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 608, + 300, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 608, + 300, + 746 + ], + "type": "text", + "content": "In the previous section, we explored security issues in a single agent setting and this section expands the discussion to multi-agent systems (MAS) [58, 71, 858, 859, 860, 861]. Since a single agent has limited problem-solving capabilities and a relatively narrow perspective, it struggles to conduct a comprehensive analysis of complex problems. In contrast, in MAS, agents can interact through various mechanisms, such as cooperation, competition, and debate, enabling them to solve complex problems more efficiently and effectively [862]. However, these interactions also introduce more complex and diverse security challenges [863]. Consequently, compared to single-agent systems, MASs face more severe" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 42, + 564, + 78 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 564, + 78 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 564, + 78 + ], + "type": "text", + "content": "and intricate security risks [864]. Similarly, we summarize and discuss existing research from both attack and defense perspectives." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 86, + 370, + 97 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 86, + 370, + 97 + ], + "spans": [ + { + "bbox": [ + 309, + 86, + 370, + 97 + ], + "type": "text", + "content": "6.3.1 Attack" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 100, + 565, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 100, + 565, + 297 + ], + "spans": [ + { + "bbox": [ + 307, + 100, + 565, + 297 + ], + "type": "text", + "content": "In MAS, security threats primarily stem from the propagation of harmful information, hallucinations, and biases through agent interactions, as well as the coordinated planning and optimization of attacks to target security agents within the system. These threats can arise spontaneously through the unintended amplification of misinformation or be deliberately orchestrated by malicious agents. Attack strategies in MAS often integrate multiple traditional techniques, such as prompt injection, jailbreak, and adversarial attacks, while also exploiting emergent properties of agent communication and collaboration. This multi-faceted nature makes MAS attacks more covert, adaptive, and challenging to detect and mitigate. Moreover, the dynamic and autonomous nature of agents allows adversaries to refine their attacks in real-time, further complicating defense mechanisms. Below, we summarize the key research related to these threats." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 297, + 565, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 297, + 565, + 482 + ], + "spans": [ + { + "bbox": [ + 308, + 297, + 565, + 482 + ], + "type": "text", + "content": "Transmissive Attack. It spreads within the MAS like a virus, propagating dangerous and harmful information, including covert malicious content, continuously attacking and compromising the agents in the system. Agent Smith [829] uses adversarial attack techniques, harmful images are generated—appearing benign on the surface but embedding malicious information. These images propagate within the MAS, causing agents to be compromised and posing significant security risks. CORBA [865] introduces Contagious Recursive Blocking Attacks, which exhibit transmissibility across any topological network and can continuously drain computational resources. Lee et al. [600] introduce Prompt Infection in MAS, including data theft, scams, misinformation, and system-wide disruption, which spreads silently. Similarly, Tan et al. [866] use multimodal malicious prompts to infect other secure agents, compromising their security." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 482, + 565, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 482, + 565, + 665 + ], + "spans": [ + { + "bbox": [ + 308, + 482, + 565, + 665 + ], + "type": "text", + "content": "Interference Attack. This attack focuses on how it interferes with and disrupts interactions within the MAS, emphasizing communication disruption and misinformation, which affect information transmission within the MAS and lead to a decline in its defensive capability. NetSafe [867] conducts extensive experiments, analyzing and revealing their structural dependencies and adversarial impacts. At the same time, Huang et al. [868] study how the resilience of MAS varies between different downstream tasks, system structures, and error types; Agent-in-the-Middle [869] manipulates and intercepts information in agent interactions through intermediary agents, disrupting the communication mechanism. The experiment validates the harm caused by the interruption of interactions by intermediary agents through a comparison of MAS with different topological structures." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 666, + 564, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 666, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 666, + 564, + 746 + ], + "type": "text", + "content": "Strategic Attack. Strategic attack involves collaboration between agents and strategic optimization of attack methods, aiming to emphasize the cooperation and long-term impact of the attack, making it increasingly dangerous and more destructive. Evil Geniuses [870] modifies system roles, where these roles collaborate to generate malicious prompts. By simulating adversarial attacks and defenses," + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 170 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 170 + ], + "type": "text", + "content": "they optimize and evaluate each round of attack behavior, making the attacks increasingly dangerous to target other agents. Amayuelas et al. [871] use adversarial attack techniques to enable harmful agents in the multi-agent system to collaborate in debates to persuade other secure agents. These malicious agents may exploit superior knowledge, larger model sizes, or greater persuasion power to gain an unfair advantage. Ju et al. [872] form a multi-agent community using a two-stage attack method: persuasive injection and knowledge manipulation injection, to induce agents to spread counterfactual and harmful knowledge." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 178, + 115, + 190 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 178, + 115, + 190 + ], + "spans": [ + { + "bbox": [ + 45, + 178, + 115, + 190 + ], + "type": "text", + "content": "6.3.2 Defense" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 193, + 300, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 193, + 300, + 307 + ], + "spans": [ + { + "bbox": [ + 44, + 193, + 300, + 307 + ], + "type": "text", + "content": "In response to the various attack methods mentioned above in multi-agent systems, many effective defense strategies have emerged that can be applied to MAS. Currently, many studies focus on forming agent groups to collaborate in joint defense and designing specific defense mechanisms, such as multi-round or multi-layer checks and filtering, to ensure the safety of the responses output by the MAS. Alternatively, defense can be achieved by identifying harmful agents through the propagation of malicious information and eliminating malicious sources." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 308, + 301, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 308, + 301, + 479 + ], + "spans": [ + { + "bbox": [ + 44, + 308, + 301, + 479 + ], + "type": "text", + "content": "Adversarial Defense. This type of defense focuses on attack-defense confrontation, leveraging this adversarial mechanism to develop more effective defense methods or mechanisms to enhance the security of the MAS. LLAMOS [873] employs adversarial defense techniques, where defensive agents and attacking agents engage in counterinteractions, with neither fully defeating the other, thereby enhancing the robustness of the defense and improving the MAS's overall defensive capability. AutoDefense [874] proposes that agents collaborate to complete defense tasks through adversarial prompt filtering, primarily focusing on filtering harmful prompt information from LLMs. In addition to using adversarial techniques for defense, defense can also be achieved by forming a multi-agent group to engage in debates." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 481, + 301, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 481, + 301, + 700 + ], + "spans": [ + { + "bbox": [ + 44, + 481, + 301, + 700 + ], + "type": "text", + "content": "Consensus Defense. To better leverage the advantages of MAS, Consensus Defense utilizes agent collaboration and consensus building for defense, employing voting, debates, and evidence-based reasoning mechanisms to establish a defense system and enhance the security of the MAS. Chern et al. [875] propose that toxicity can be reduced through multi-agent debates, and the widespread use of multi-agent interactions can lead to marginal improvements. Similarly, BlockAgent [876] proposes a Proof-of-Thought consensus mechanism that combines stake-based miner designation with multi-round debate-style voting, enabling BlockAgents to facilitate multi-agent collaboration through a structured workflow. Audit-LLM [877] proposes a pair-wise Evidence-based Multi-agent Debate mechanism, designed to defend against hallucinations by forming a MAS to detect internal threats. This approach is divided into three components: task decomposition, tool construction, and the final execution of the MAS, ultimately reaching consensus through reasoning." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 700, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 700, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 700, + 301, + 748 + ], + "type": "text", + "content": "Structural Defense. Structural Defense treats the MAS as a network structure for planning defense methods, using graph analysis techniques to detect anomalies and resist attacks while incorporating knowledge from other domains" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 42, + 566, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 566, + 111 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 566, + 111 + ], + "type": "text", + "content": "to enrich defense strategies in MAS. G-Safeguard [878] compares agents in MAS with various topological structures to nodes in a graph, using Graph Neural Networks (GNN) [879, 880] to detect anomalies in the agents' dialogue graphs and counter adversarial attacks and misinformation within the MAS." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 122, + 468, + 134 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 122, + 468, + 134 + ], + "spans": [ + { + "bbox": [ + 309, + 122, + 468, + 134 + ], + "type": "text", + "content": "6.4 Agent Communication Safety" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 137, + 566, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 137, + 566, + 415 + ], + "spans": [ + { + "bbox": [ + 307, + 137, + 566, + 415 + ], + "type": "text", + "content": "As Large Language Model (LLM)-based Agents evolve from isolated entities into interconnected MAS, the mechanisms governing communication between Agents, and their interactions with external environments and tools, have become increasingly critical. Agents exchange information and collaborate through message passing, tool invocation, and environmental interactions; these mechanisms, while essential to system functionality, also expose significant attack surfaces. Early methods [881, 882, 883, 884, 885, 886, 887] of Agent interaction often relied on ad-hoc approaches, such as shared memory [888], API calls [889] or unstructured function calls [890], leading to fragmented systems lacking unified security considerations. To address this challenge and enhance interoperability, standardized communication protocols have emerged. Examples include Anthropic's Model Context Protocol (MCP) [891] for Agent-tool interactions, Google's Agent2Agent (A2A) [892] for enterprise-level Agent collaboration, and the Agent Network Protocol (ANP) [893] for open network interoperability, along with other commonly used protocols [894, 895, 896, 897, 898, 899, 900, 901, 902, 903, 904]. However, the open design and dynamic nature of these communication mechanisms, coupled with the autonomy of the Agent, has exposed new vulnerabilities while enhancing functionality." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 309, + 421, + 370, + 432 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 421, + 370, + 432 + ], + "spans": [ + { + "bbox": [ + 309, + 421, + 370, + 432 + ], + "type": "text", + "content": "6.4.1 Attack" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 434, + 566, + 550 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 434, + 566, + 550 + ], + "spans": [ + { + "bbox": [ + 307, + 434, + 566, + 550 + ], + "type": "text", + "content": "The interconnected nature of MAS, facilitated by numerous communication channels, creates a multifaceted attack surface. While individual Large Language Models (LLMs) possess inherent vulnerabilities, the interactions and communications among Agents introduce novel threats that exploit the system's collaborative dynamics. These threats target various components, including communication channels, content interpretation, and underlying protocols, with examples such as Shadowing Attacks, Naming Attacks, Context Poisoning, and Rug Pulls." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 550, + 566, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 550, + 566, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 550, + 566, + 746 + ], + "type": "text", + "content": "Attacks Communication Channels. These attacks directly disrupt the transmission and routing of messages in the system, affecting both inter-Agent communications and interactions with external endpoints. For instance, Agent-in-the-Middle (AiTM) attacks [869] specifically target the core communication mechanisms of LLM-MAS. By intercepting and manipulating messages between Agents, these attacks can cause Agents to perform unintended actions, thereby compromising the entire system. Such attacks underscore the critical security vulnerabilities arising from the communication-dependent nature of Agent collaboration. Furthermore, attacks targeting communication channels and transmission processes, such as communication perturbation [905], involve adversaries injecting noise into messages in transit [906] or masquerading as legitimate sources [907], thereby compromising both the efficiency and security of Agent collaboration." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 168 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 168 + ], + "type": "text", + "content": "Attacks Content. These attacks target the content of messages themselves, leveraging the mechanisms by which Agents process and interpret received information. For example, Prompt Injection involves embedding malicious instructions into data or content that Agents retrieve or receive through communication channels, thereby manipulating the Agent's behavior or decision-making processes. This technique is discussed in several works, such as [600] and [543]. Additionally, [908] explores indirect Prompt Injection within tool-based scenarios, highlighting the varied strategies employed in complex environments." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 170, + 301, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 170, + 301, + 319 + ], + "spans": [ + { + "bbox": [ + 47, + 170, + 301, + 319 + ], + "type": "text", + "content": "Attacks Exploiting Multi-Agent Dynamics. These attacks leverage the interconnected structure, interaction patterns, or collective behavior of communication-driven Multi-Agent Systems (MAS) to amplify their impact or achieve strategic objectives. Contagious attacks (propagation) initiate malicious behavior on a single agent and spread it across the entire network via inter-agent communication [829, 865]. Additionally, malicious agents can coordinate through collective communication to achieve harmful goals, such as replicating malicious instructions across the network by sending replication code or commands, thereby leading to the sharing of legitimate communication keys or identity information with other malicious entities [909]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 329, + 115, + 339 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 329, + 115, + 339 + ], + "spans": [ + { + "bbox": [ + 45, + 329, + 115, + 339 + ], + "type": "text", + "content": "6.4.2 Defense" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 342, + 300, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 342, + 300, + 446 + ], + "spans": [ + { + "bbox": [ + 44, + 342, + 300, + 446 + ], + "type": "text", + "content": "To tackle threats to Agent communication, research proposes a multi-layered defense strategy addressing key points across the communication pipeline, from infrastructure to Agent-level processing. These defenses aim to prevent, detect, or mitigate attacks on channels, content, infrastructure, dynamics, and environmental factors. The strategies integrate into infrastructure and protocol design, individual Agents' message processing, and the collaborative and learning mechanisms of the MAS." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 447, + 301, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 447, + 301, + 631 + ], + "spans": [ + { + "bbox": [ + 44, + 447, + 301, + 631 + ], + "type": "text", + "content": "Protocol Defenses. Protecting the foundation of Agent communication. This includes adopting standardized protocols with built-in security features (encryption, integrity checks, authentication) To counter Agent communication threats, research proposes multi-layered defense strategies targeting different points in the communication pipeline, from the underlying infrastructure to Agent-level message processing. Effective defenses aim to prevent, detect, or mitigate attacks on communication channels, content, infrastructure, such as MCP [891], A2A [892], ANP [893] standards. Establishing managed registries and identity systems for Agent and Tool/Service registration and identity management. Enforcing strong Agent identity verification and access control policies, including JIT credential provisioning. Implementing mechanisms to enforce communication dynamics, and environmental impacts." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 632, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 632, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 632, + 301, + 748 + ], + "type": "text", + "content": "Content Defense. These defenses operate at the agent level, focusing on how agents process received messages and content. This includes input modification and filtering, which preprocess incoming content to neutralize adversarial elements. Agents also employ active defense mechanisms, such as reliability estimation, to assess the trustworthiness of messages based on local context, thereby mitigating the impact of untrusted information. For example, [910] proposed an active defense strategy that utilizes a reliability estimator to judge the credibility of received messages and" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 42, + 565, + 77 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 565, + 77 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 565, + 77 + ], + "type": "text", + "content": "employs a decomposable message aggregation policy network to reduce the influence of unreliable messages on the final decision." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 95, + 444, + 107 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 95, + 444, + 107 + ], + "spans": [ + { + "bbox": [ + 309, + 95, + 444, + 107 + ], + "type": "text", + "content": "6.5 Agent Safety Evaluation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 110, + 565, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 110, + 565, + 216 + ], + "spans": [ + { + "bbox": [ + 307, + 110, + 565, + 216 + ], + "type": "text", + "content": "Currently, there is already a substantial body of work evaluating the performance of LLM-based agent systems on different tasks [911, 912, 913, 914, 915]. In this section, we focus on benchmarks designed to assess the security of agents. Broadly speaking, these benchmarks include those that construct datasets and those that use other agents to set up sandbox environments for evaluation, each with distinct assessment priorities and specific scenarios for agent security [314, 916, 917, 918, 919]." + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 310, + 240, + 565, + 404 + ], + "blocks": [ + { + "bbox": [ + 351, + 225, + 523, + 237 + ], + "lines": [ + { + "bbox": [ + 351, + 225, + 523, + 237 + ], + "spans": [ + { + "bbox": [ + 351, + 225, + 523, + 237 + ], + "type": "text", + "content": "TABLE 10: Benchmarks for agent safety." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 240, + 565, + 404 + ], + "lines": [ + { + "bbox": [ + 310, + 240, + 565, + 404 + ], + "spans": [ + { + "bbox": [ + 310, + 240, + 565, + 404 + ], + "type": "table", + "html": "
BenchmarkDynamicLLM asEvaluatorEvaluation Focus
InjectAgent [920]Prompt Injection
AgentDojo [849]Prompt Injection
AgentBackdoorEval [816]Backdoor
RiskAwareBench [921]Embodied Agent
RedCode [916]Coding Agent
S-Eval [917]General
Bells [918]General
AgentSafetyBench [922]General
AgentSecurityBench [?]General
AgentHarm [923]General
R-Judge [314]General
ToolSowrd [924]Tool
PrivacyLens [919]Privacy
ToolEmu [925]Tool
HAIEcosystem [926]General
SafeAgentBench [927]General
JailJudge [928]Jailbreak
", + "image_path": "2fa9a8de989beb0f98e2c807f841f4935386a53ffedb159b40fad075d49e0a82.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 426, + 463, + 437 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 426, + 463, + 437 + ], + "spans": [ + { + "bbox": [ + 309, + 426, + 463, + 437 + ], + "type": "text", + "content": "6.5.1 Attack-Specific Benchmarks" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 441, + 565, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 441, + 565, + 685 + ], + "spans": [ + { + "bbox": [ + 307, + 441, + 565, + 685 + ], + "type": "text", + "content": "This type of benchmark focuses on testing the security of an agent when facing specific types of attacks, such as Prompt Injection [600, 929], Backdoor [817, 930, 931], and Jailbreak [874, 932]. Specifically, InjectAgent [920] evaluates LLM agents' vulnerability to indirect prompt injection attacks, measuring behavior safety when tool-integrated agents process malicious instructions embedded in external content, with hacking prompts as an enhancement. A similar work is AgentDojo [849], a dynamic, extensible evaluation framework for assessing prompt injection attacks and defenses in LLM agents by simulating realistic tasks (e.g., email management, banking) with stateful environments and multi-tool interactions under adversarial conditions. As for backdoor attacks, AgentBackdoorEval [816] includes five real-world domains (including Banking-Finance, Medical, and Social Media) with automatically generated prompts, simulated tools, and tailored backdoor triggers to assess attack stealth and effectiveness. Besides, JailJudge [928] introduces a comprehensive jailbreak evaluation benchmark featuring a voting JailJudge MultiAgent, a comprehensive JailJudgeTrain dataset, and a trained Jailjudge Guard." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 309, + 696, + 468, + 708 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 696, + 468, + 708 + ], + "spans": [ + { + "bbox": [ + 309, + 696, + 468, + 708 + ], + "type": "text", + "content": "6.5.2 Module-Specific Benchmarks" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 712, + 565, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 712, + 565, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 712, + 565, + 748 + ], + "type": "text", + "content": "Currently, these benchmarks for evaluating the security of a specific module in an agent focus on the invocation of tools [933, 934, 935, 936]. For example, ToolSowrd [924] evaluates" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 125 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 125 + ], + "type": "text", + "content": "LLM safety in tool learning across three stages (input, execution, output) by designing six adversarial scenarios (e.g., malicious queries, noisy tool misdirection, harmful feedback). ToolEmu [925] employs an LM-emulated sandbox to simulate diverse high-stakes tool executions and scenarios, leveraging GPT-4 for both tool emulation and automatic safety/helpfulness evaluations." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 136, + 169, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 136, + 169, + 148 + ], + "spans": [ + { + "bbox": [ + 45, + 136, + 169, + 148 + ], + "type": "text", + "content": "6.5.3 General Benchmarks" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 152, + 301, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 152, + 301, + 510 + ], + "spans": [ + { + "bbox": [ + 44, + 152, + 301, + 510 + ], + "type": "text", + "content": "In addition to the previously mentioned benchmarks that focus on a specific aspect of agent security, some efforts have developed more comprehensive and holistic evaluation frameworks, taking into account diverse scenarios, different agents, and various offensive and defensive techniques. For instance, AgentSafetyBench [922] assesses LLM agent safety through 2,000 test cases across 349 interactive environments, covering 8 risk categories (e.g., data leaks, physical harm) and 10 failure modes (e.g., incorrect tool calls, risk unawareness), with automated scoring via a fine-tuned model. Similarly, AgentSecurityBench [?] is a comprehensive framework that formalizes and evaluates attacks (e.g., Direct/Indirect Prompt Injection, Memory Poisoning) and defenses across 10 scenarios, 10 agents, and 13 LLM backbones, using 7 evaluation metrics. SafeAgentBench [927] evaluates embodied LLM agents' safety awareness with 750 diverse tasks (detailed, abstract, long-horizon) in SafeAgentEnv simulation environment, leveraging GPT-4 for task generation and dual evaluators (execution-based and semantic). HAIEcosystem [926] evaluates safety through multi-turn interactions between human users (benign/malicious) and AI agents across 132 scenarios, using modular sandbox environment and LLM-based dynamic risk measurement. AgentHarm [923] tests agent robustness by evaluating compliance with 110 explicitly malicious multi-step tasks across 11 harm categories, using synthetic tools and fine-grained grading rubrics. Different form previous benchmarks, RiskAwareBench [921] focuses on embodied agents, evaluating physical risk awareness via four modules: safety tip generation, risky scene generation, plan generation, and automated evaluation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 522, + 195, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 522, + 195, + 534 + ], + "spans": [ + { + "bbox": [ + 45, + 522, + 195, + 534 + ], + "type": "text", + "content": "6.5.4 LLM Deployment Roadmap" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 538, + 300, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 538, + 300, + 583 + ], + "spans": [ + { + "bbox": [ + 44, + 538, + 300, + 583 + ], + "type": "text", + "content": "In the deployment of LLMs under frozen parameters, the security landscape has evolved through a tightly coupled dynamic among attacks, defenses, and evaluation mechanisms." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 584, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 584, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 584, + 301, + 746 + ], + "type": "text", + "content": "Initially, black-box attacks leveraged the generative capabilities of LLMs themselves to optimize adversarial prompts, often without precise alignment to the decision boundaries. In contrast, gradient-guided white-box methods offer greater control but face inherent limitations due to the discrete nature of token spaces resulting in prompts with weakened semantic fidelity. These attack trends have catalyzed the emergence of prompt-level defense strategies. To counter black-box attacks, recent defenses adopt prompt shaping and system-level constraints to guide and restrict the model's response behavior. For gradient-based attacks, defenses typically apply perplexity-based detection and semantic consistency checks to identify suspicious or adversarial outputs." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 42, + 564, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 564, + 169 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 564, + 169 + ], + "type": "text", + "content": "The growing sophistication of defenses reshaped the requirements for evaluation. Static, one-shot rejection mechanisms have proven insufficient in multi-task and multimodal deployments, prompting the development of dynamic strategies such as response rewriting, hierarchical permission control, and consensus-based filtering across multiple models. These strategies demand richer evaluation protocols beyond single metric assessments, shifting toward behavior metrics that capture cross-input consistency, risk under specific task conditions, and adaptability to strategy switching." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 169, + 564, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 169, + 564, + 286 + ], + "spans": [ + { + "bbox": [ + 308, + 169, + 564, + 286 + ], + "type": "text", + "content": "As the attack-defense interaction intensifies, the evaluation itself has become a critical driver of system evolution. Recent frameworks have introduced automated red teaming pipelines, enabling a closed-loop process where jailbreak samples are continually generated, tested against deployed defenses, and fed back to guide both adversarial strategies and defense refinement. This has laid the groundwork for a new paradigm in LLM security research: one where attack, defense, and evaluation are no longer treated in isolation but co-evolve as an interdependent, self-reinforcing system." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 309, + 293, + 468, + 305 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 293, + 468, + 305 + ], + "spans": [ + { + "bbox": [ + 309, + 293, + 468, + 305 + ], + "type": "text", + "content": "6.5.5 LLM Deployment Perspective" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 308, + 564, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 308, + 564, + 458 + ], + "spans": [ + { + "bbox": [ + 308, + 308, + 564, + 458 + ], + "type": "text", + "content": "(1) Attack strategies will become more structured and semantically aligned. (i) Black-box attacks may evolve through agent-based optimization, enabling sentence-level jailbreaks with clearer intent and higher success rates. (ii) To overcome the limitations of token-level gradient attacks, future work may focus on generating semantically consistent adversarial prompts that are less detectable by perplexity-based defenses. (iii) Open-source models will serve as surrogates for closed models, allowing attackers to replicate decision boundaries before launching white-box attacks. (iv) Variants from fine-tuning pipelines may leak private information through cross-model comparison, introducing version-aware privacy risks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 458, + 564, + 654 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 308, + 458, + 564, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 458, + 564, + 548 + ], + "spans": [ + { + "bbox": [ + 308, + 458, + 564, + 548 + ], + "type": "text", + "content": "(2) Defenses will shift toward adaptive and transferable mechanisms. (i) Prompt-based defenses will evolve into context-aware controllers that adjust behavior based on input semantics and task context. (ii) Generalizable defenses that work across domains and languages will be critical for scalable deployment. (iii) Future systems may support online updates, enabling continuous refinement in response to new threats." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 308, + 550, + 564, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 550, + 564, + 654 + ], + "spans": [ + { + "bbox": [ + 308, + 550, + 564, + 654 + ], + "type": "text", + "content": "(3) Evaluation will act as both a diagnostic and driving force. (i) Benchmarks must expand beyond text to cover multimodal inputs and tool-based actions. (ii) Multi-objective evaluation will replace single-metric scoring, balancing safety and utility through trade-off analysis. (iii) Static test sets will give way to adaptive, streaming benchmarks that evolve with attack trends. (iv) Automated red teaming will close the loop, enabling real-time attack generation, evaluation, and defense adjustment." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 309, + 663, + 411, + 675 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 663, + 411, + 675 + ], + "spans": [ + { + "bbox": [ + 309, + 663, + 411, + 675 + ], + "type": "text", + "content": "6.5.6 Agent Roadmap" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 677, + 565, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 677, + 565, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 677, + 565, + 748 + ], + "type": "text", + "content": "Agent. The evolution of LLM-based agents originated from role-playing paradigms [801, 937, 938, 939], where researchers investigated organizational structures, role allocation mechanisms, and implementation workflows for task-oriented agents in various social contexts. These systematic explorations not only demonstrated agents' potential in" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 168 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 168 + ], + "type": "text", + "content": "addressing human societal challenges but also spawned interdisciplinary research programs spanning sociology, organizational theory, and psychology. As the field advanced, research focus shifted toward automated agent workflows [795, 860, 940, 941], domain-specific methods for embodied intelligence, and the development of agent capabilities in tool utilization and memory management. Through this progression, agent systems have emerged as a transformative paradigm for automating human social processes, gaining significant recognition as a viable solution for complex societal automation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 169, + 301, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 169, + 301, + 343 + ], + "spans": [ + { + "bbox": [ + 44, + 169, + 301, + 343 + ], + "type": "text", + "content": "The rapid advancement of agent capabilities and architectures has brought safety concerns to the forefront of academic and industrial research. These challenges span multiple critical dimensions: tool safety, memory security, and the agent's fundamental operational integrity. Inheriting both the capabilities and vulnerabilities of their underlying LLM foundations, agents intrinsically carry these \"genetic\" weaknesses into more complex operational environments. This inheritance makes safety vulnerabilities particularly acute in agent systems, especially when handling sensitive real-world applications involving personal privacy and financial assets. The development of agent technologies has thus become inextricably linked with safety considerations. Recent years (" + }, + { + "bbox": [ + 44, + 169, + 301, + 343 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 44, + 169, + 301, + 343 + ], + "type": "text", + "content": "2023- until now) have witnessed accelerated research in agent safety, focusing on four key frontiers:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 346, + 299, + 437 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 55, + 346, + 299, + 367 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 346, + 299, + 367 + ], + "spans": [ + { + "bbox": [ + 55, + 346, + 299, + 367 + ], + "type": "text", + "content": "- Agent Brain Security: The core decision-making mechanisms." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 369, + 299, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 369, + 299, + 392 + ], + "spans": [ + { + "bbox": [ + 55, + 369, + 299, + 392 + ], + "type": "text", + "content": "- Tool Invocation Safety: Secure external API and tool usage." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 392, + 299, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 392, + 299, + 415 + ], + "spans": [ + { + "bbox": [ + 55, + 392, + 299, + 415 + ], + "type": "text", + "content": "- Memory Retrieval Protection: Robustness against memory poisoning." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 415, + 299, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 415, + 299, + 437 + ], + "spans": [ + { + "bbox": [ + 55, + 415, + 299, + 437 + ], + "type": "text", + "content": "- Communication Protocol Security: Safe multi-agent interactions." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 44, + 441, + 301, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 441, + 301, + 475 + ], + "spans": [ + { + "bbox": [ + 44, + 441, + 301, + 475 + ], + "type": "text", + "content": "Emerging work has also begun addressing safety challenges in embodied agent scenarios, marking an important expansion of the research domain." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 488, + 129, + 500 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 488, + 129, + 500 + ], + "spans": [ + { + "bbox": [ + 45, + 488, + 129, + 500 + ], + "type": "text", + "content": "6.5.7 Perspective" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 44, + 503, + 299, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 503, + 299, + 526 + ], + "spans": [ + { + "bbox": [ + 44, + 503, + 299, + 526 + ], + "type": "text", + "content": "We outline potential future research directions for agent systems and analyze their developmental trajectory:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 44, + 527, + 301, + 748 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 44, + 527, + 301, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 527, + 301, + 643 + ], + "spans": [ + { + "bbox": [ + 44, + 527, + 301, + 643 + ], + "type": "text", + "content": "(1) Safety of External Agent Modules. Unlike standalone LLMs, agents interact with external modules (e.g., tools, memory), which are exposed to open environments and thus more vulnerable to attacks. Key research challenges include: (i) Tool Safety: Secure tool invocation and API usage to prevent adversarial exploitation. (ii) Memory Protection: Robustness against memory poisoning and unauthorized access, to name just a few. These external interfaces introduce unique attack surfaces, making their security a critical research priority." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 44, + 643, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 643, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 643, + 301, + 748 + ], + "type": "text", + "content": "(2) Stability and Reliability of Dynamically Updated Agents via Reinforcement Learning: As reinforcement learning (RL) [35, 942, 943] techniques become increasingly integrated with LLM-based agents, these systems are being deployed in more complex and dynamic environments. While this integration enhances agents' adaptability and intelligence, it also introduces significant risks: (i) Emergent Threats: Advanced RL capabilities may inadvertently enable agents to learn and propagate harmful behaviors or danger-" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 307, + 42, + 564, + 77 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 564, + 77 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 564, + 77 + ], + "type": "text", + "content": "ous information. (ii) Dynamic Vulnerability: Continuous online learning increases exposure to adversarial perturbations or reward hacking." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 77, + 564, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 77, + 564, + 169 + ], + "spans": [ + { + "bbox": [ + 307, + 77, + 564, + 169 + ], + "type": "text", + "content": "Critical Research Directions: (i) Safe RL Frameworks: Developing constrained optimization methods to bound agent behavior within ethical and operational guardrails. (ii) Stability-Aware Updates: Designing update protocols that balance adaptability with robustness (e.g., catastrophic forgetting mitigation). (iii) Anomaly Detection: Real-time monitoring of learning trajectories to identify and neutralize hazardous knowledge acquisition." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 169, + 564, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 169, + 564, + 227 + ], + "spans": [ + { + "bbox": [ + 308, + 169, + 564, + 227 + ], + "type": "text", + "content": "(3) Safety of Embodied Agents in Domain-Specific Scenarios: As autonomous agents become increasingly deployed across specialized domains, their safety considerations must account for unique domain-specific vulnerabilities. We list some key challenges as follows:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 319, + 229, + 564, + 300 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 319, + 229, + 387, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 229, + 387, + 240 + ], + "spans": [ + { + "bbox": [ + 319, + 229, + 387, + 240 + ], + "type": "text", + "content": "Web Agents:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 328, + 242, + 564, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 242, + 564, + 254 + ], + "spans": [ + { + "bbox": [ + 328, + 242, + 564, + 254 + ], + "type": "text", + "content": "- HTML/JS injection risks during automated browsing" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 328, + 254, + 564, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 254, + 564, + 276 + ], + "spans": [ + { + "bbox": [ + 328, + 254, + 564, + 276 + ], + "type": "text", + "content": "- Secure sandboxing requirements for DOM manipulation" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 328, + 277, + 564, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 277, + 564, + 300 + ], + "spans": [ + { + "bbox": [ + 328, + 277, + 564, + 300 + ], + "type": "text", + "content": "- Cross-site scripting (XSS) vulnerabilities in automated form-filling" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 320, + 302, + 437, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 302, + 437, + 313 + ], + "spans": [ + { + "bbox": [ + 320, + 302, + 437, + 313 + ], + "type": "text", + "content": "- Communication Agents:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 328, + 315, + 564, + 373 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 328, + 315, + 564, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 315, + 564, + 338 + ], + "spans": [ + { + "bbox": [ + 328, + 315, + 564, + 338 + ], + "type": "text", + "content": "- Protocol-level attacks (e.g., SIP flooding, WebRTC exploits)" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 328, + 338, + 564, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 338, + 564, + 361 + ], + "spans": [ + { + "bbox": [ + 328, + 338, + 564, + 361 + ], + "type": "text", + "content": "- End-to-end encryption requirements for sensitive dialogues" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 328, + 361, + 528, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 361, + 528, + 373 + ], + "spans": [ + { + "bbox": [ + 328, + 361, + 528, + 373 + ], + "type": "text", + "content": "- Authentication bypass in voice-based agents" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 320, + 376, + 441, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 376, + 441, + 387 + ], + "spans": [ + { + "bbox": [ + 320, + 376, + 441, + 387 + ], + "type": "text", + "content": "Robotics Control Agents:" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 328, + 388, + 548, + 423 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 328, + 388, + 548, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 388, + 548, + 399 + ], + "spans": [ + { + "bbox": [ + 328, + 388, + 548, + 399 + ], + "type": "text", + "content": "- Physical safety constraints in actuator commands" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 328, + 400, + 515, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 400, + 515, + 411 + ], + "spans": [ + { + "bbox": [ + 328, + 400, + 515, + 411 + ], + "type": "text", + "content": "Real-time collision avoidance verification" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 328, + 411, + 502, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 411, + 502, + 423 + ], + "spans": [ + { + "bbox": [ + 328, + 411, + 502, + 423 + ], + "type": "text", + "content": "- Emergency stop mechanism reliability" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 320, + 426, + 414, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 426, + 414, + 437 + ], + "spans": [ + { + "bbox": [ + 320, + 426, + 414, + 437 + ], + "type": "text", + "content": "Healthcare Agents:" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 328, + 438, + 515, + 462 + ], + "type": "list", + "angle": 0, + "index": 36, + "blocks": [ + { + "bbox": [ + 328, + 438, + 515, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 438, + 515, + 450 + ], + "spans": [ + { + "bbox": [ + 328, + 438, + 515, + 450 + ], + "type": "text", + "content": "Medical decision audit trail requirements" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 328, + 451, + 497, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 451, + 497, + 462 + ], + "spans": [ + { + "bbox": [ + 328, + 451, + 497, + 462 + ], + "type": "text", + "content": "- Drug interaction verification systems" + } + ] + } + ], + "index": 35 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 309, + 477, + 518, + 488 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 477, + 518, + 488 + ], + "spans": [ + { + "bbox": [ + 309, + 477, + 518, + 488 + ], + "type": "text", + "content": "7 SAFETY IN LLM-BASED APPLICATION" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 307, + 492, + 564, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 492, + 564, + 643 + ], + "spans": [ + { + "bbox": [ + 307, + 492, + 564, + 643 + ], + "type": "text", + "content": "In this section, we focus on the security considerations that should be addressed following the commercialization of LLMs into practical applications. With the rapid development of LLMs in fields such as content creation, intelligent interaction, automated programming, medical diagnosis, and financial analysis, LLM-based applications are reshaping industry workflows and business models [944]. However, while LLMs significantly enhance productivity and facilitate human-machine collaboration, their large-scale deployment has also introduced severe security challenges [66]. Ensuring the security, reliability, and compliance of LLM-based applications has become a critical issue in AI research and real-world implementation." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 307, + 643, + 564, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 643, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 307, + 643, + 564, + 746 + ], + "type": "text", + "content": "Truthfulness. Despite their powerful text generation capabilities, LLMs exhibit hallucination phenomena, generating inaccurate, misleading, or entirely fictitious content [945, 946, 947, 948, 949]. Unlike traditional errors, hallucinations are often subtle and linguistically plausible, making them especially dangerous in real-world applications. This challenge is exacerbated in high-stakes domains such as healthcare, law, and finance, where misleading AI-generated information can directly affect human safety and economic" + } + ] + } + ], + "index": 39 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 45, + 44, + 564, + 397 + ], + "blocks": [ + { + "bbox": [ + 45, + 44, + 564, + 397 + ], + "lines": [ + { + "bbox": [ + 45, + 44, + 564, + 397 + ], + "spans": [ + { + "bbox": [ + 45, + 44, + 564, + 397 + ], + "type": "image", + "image_path": "792d0e5c90e63607687a3b7c2093f939694dd3631a5e08614fac6eb7112e1843.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 51, + 403, + 557, + 450 + ], + "lines": [ + { + "bbox": [ + 51, + 403, + 557, + 450 + ], + "spans": [ + { + "bbox": [ + 51, + 403, + 557, + 450 + ], + "type": "text", + "content": "Fig. 14: We illustrate the diverse applications of AI in enterprise productivity, content generation, programming, healthcare, finance, customer support, education, and cyber-security. We also highlight critical issues related to truthfulness and privacy, including data leakage, security threats, property rights, fairness, and regulatory compliance, underscoring the need for robust safeguards in AI deployment" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 471, + 302, + 738 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 471, + 302, + 738 + ], + "spans": [ + { + "bbox": [ + 44, + 471, + 302, + 738 + ], + "type": "text", + "content": "stability. For example, an LLM-powered clinical assistant may suggest nonexistent diseases or cite unverified treatments, posing risks to patients [739, 950], while financial advisors powered by LLMs might generate persuasive but flawed market forecasts, leading to significant capital misallocation or systemic financial vulnerabilities [951]. Specifically, hallucination is not merely a surface-level output flaw but a systemic artifact rooted in the model's training dynamics and the nature of its data. Specifically, hallucination can stem from three compounding factors: (1) semantic overgeneralization due to exposure to noisy, unverified, or synthetic pretraining corpora; (2) objective misalignment, where maximum-likelihood or reinforcement-based training prioritizes coherence and helpfulness over factual accuracy; and (3) latent distribution shifts between pretraining and deployment-time inputs, particularly under long-tail or adversarial queries [952, 953]. These factors jointly reinforce spurious correlations and amplify unsupported generations, even in otherwise well-aligned models. In sum, hallucination represents a critical bottleneck for the reliable deployment of LLMs. Its mitigation is foundational not only for improving user trust but also for enabling the safe integration of LLMs into high-stakes decision-making" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 309, + 471, + 359, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 471, + 359, + 482 + ], + "spans": [ + { + "bbox": [ + 309, + 471, + 359, + 482 + ], + "type": "text", + "content": "workflows." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 308, + 487, + 565, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 487, + 565, + 685 + ], + "spans": [ + { + "bbox": [ + 308, + 487, + 565, + 685 + ], + "type": "text", + "content": "Privacy. Data privacy concerns [954] represent another significant challenge in LLM deployment [821, 955]. Training these models requires vast amounts of text data, which may include personal information, corporate secrets, and medical records [956]. If an LLM inadvertently leaks sensitive training data or lacks robust access control mechanisms, users' private information could be exploited or misused. In corporate settings, LLMs may unintentionally expose confidential documents or sensitive customer data, leading to severe compliance and legal risks. Moreover, inference-time attacks [957], such as membership inference and model extraction, can further expose sensitive data by allowing adversaries to infer training set membership or replicate model behavior. Therefore, LLM-based applications must incorporate data protection measures and privacy-preserving techniques like differential privacy and query rate limiting to mitigate information leakage risks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 308, + 688, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 688, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 688, + 564, + 748 + ], + "type": "text", + "content": "Robustness. Prompt injection [543] and jailbreak [636] risks pose additional security threats. Attackers can craft adversarial prompts to bypass security restrictions, causing the model to generate harmful or unauthorized content. For example, in chatbot systems, malicious users could manip" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 123 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 123 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 123 + ], + "type": "text", + "content": "ulate LLMs to generate hate speech, disinformation, or even harmful instructions. Similarly, in AI-powered coding assistants such as GitHub Copilot, attackers may exploit LLMs to produce code with security vulnerabilities, potentially serving as backdoors for future cyberattacks. Developing robust security defenses to prevent LLMs from being misused in real-world applications is crucial for AI safety." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 124, + 301, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 124, + 301, + 297 + ], + "spans": [ + { + "bbox": [ + 47, + 124, + 301, + 297 + ], + "type": "text", + "content": "Copyright. Another pressing concern is intellectual property and copyright protection [958, 959, 960]. LLMs are trained on vast datasets that often include copyrighted texts, source code, and artistic works, raising potential infringement risks. When generating content, LLMs may inadvertently replicate or closely mimic copyrighted material, leading to legal disputes. For instance, AI-powered writing tools might generate articles resembling published works, while coding assistants could produce open-source code snippets without proper licensing [961]. This not only raises concerns about content originality but also introduces legal and ethical dilemmas. Addressing these challenges requires watermarking [962, 963], provenance tracking, and clear copyright attribution mechanisms to ensure responsible AI-generated content management [178]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 297, + 301, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 297, + 301, + 457 + ], + "spans": [ + { + "bbox": [ + 47, + 297, + 301, + 457 + ], + "type": "text", + "content": "Ethical and Social Responsibility. Beyond technical concerns, ethical and social responsibility are also critical factors in large-scale LLM deployment. Due to biases in training data, LLMs may generate content that reinforces stereotypes, gender discrimination, or racial biases [964, 965]. In sectors such as hiring, finance, and healthcare, biased AI-generated recommendations could exacerbate existing inequalities and lead to unfair decision-making. Moreover, as LLMs become increasingly integrated into virtual assistants, social media, and news distribution platforms, concerns over AI-generated misinformation, transparency, and accountability are growing. Building fair, transparent, and trustworthy AI governance frameworks is thus essential to mitigating AI-induced social risks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 458, + 301, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 458, + 301, + 619 + ], + "spans": [ + { + "bbox": [ + 44, + 458, + 301, + 619 + ], + "type": "text", + "content": "Governance. As governments worldwide strengthen AI regulations, LLM-related legal and compliance requirements are evolving rapidly. The EU AI Act classifies LLMs as high-risk AI systems, requiring developers to provide transparency reports and risk control mechanisms [966]. China's Generative AI Regulations mandate AI-generated content to align with ethical standards and undergo governmental scrutiny [967]. In the United States, regulatory discussions emphasize AI transparency and data privacy protections, urging businesses to establish responsible AI practices [968]. These policy developments indicate that LLM-based applications must comply with regional regulations while maintaining a balance between compliance and innovation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 620, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 620, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 620, + 301, + 748 + ], + "type": "text", + "content": "In summary, while LLM-based applications drive technological progress, they also introduce multifaceted challenges related to misinformation, data privacy, adversarial manipulation, copyright infringement, ethical concerns, and regulatory compliance (refer to Figure 14). These issues not only impact the trustworthiness and legality of AI technologies but also have far-reaching implications for social trust, legal accountability, and business sustainability. Addressing these challenges necessitates a comprehensive approach that integrates privacy protection, content governance, copyright management, ethical safeguards, and regulatory compli" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 308, + 42, + 564, + 66 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 564, + 66 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 564, + 66 + ], + "type": "text", + "content": "ance, alongside collaborative efforts from both academia and industry." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 309, + 83, + 509, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 83, + 509, + 95 + ], + "spans": [ + { + "bbox": [ + 309, + 83, + 509, + 95 + ], + "type": "text", + "content": "8 POTENTIAL RESEARCH DIRECTIONS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 99, + 565, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 99, + 565, + 133 + ], + "spans": [ + { + "bbox": [ + 308, + 99, + 565, + 133 + ], + "type": "text", + "content": "Through a systematic and comprehensive examination of safety across the entire lifecycle of LLMs, we have identified valuable insights for future research:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 316, + 135, + 564, + 437 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 316, + 135, + 564, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 135, + 564, + 194 + ], + "spans": [ + { + "bbox": [ + 316, + 135, + 564, + 194 + ], + "type": "text", + "content": "* Data generation holds immense potential, particularly in ensuring the safety of generated data and automating the data generation process, which is crucial for reliable and robust model training. Reliable data generation is fundamental to the integrity of model training." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 316, + 194, + 564, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 194, + 564, + 251 + ], + "spans": [ + { + "bbox": [ + 316, + 194, + 564, + 251 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 316, + 194, + 564, + 251 + ], + "type": "text", + "content": " Post-training phases are becoming increasingly critical. Ensuring secure fine-tuning and alignment of data is a key future direction, closely intertwined with data generation. As concepts proliferate, multi-objective alignment may emerge as a significant area of focus." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 316, + 251, + 564, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 251, + 564, + 365 + ], + "spans": [ + { + "bbox": [ + 316, + 251, + 564, + 365 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 316, + 251, + 564, + 365 + ], + "type": "text", + "content": " Model editing and unlearning safety are paramount for efficient model updates and deployment. Current learning efficiencies are suboptimal, and advancements in these technologies could revolutionize how models acquire new knowledge, enabling continuous and efficient learning (potentially even localized memory learning). These techniques might surpass traditional SGD algorithms, but safety measures are essential to prevent models from devolving into malicious entities that contradict human intentions." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 316, + 366, + 564, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 366, + 564, + 437 + ], + "spans": [ + { + "bbox": [ + 316, + 366, + 564, + 437 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 316, + 366, + 564, + 437 + ], + "type": "text", + "content": " LLM agents, in the final deployment stage, require robust safety assurances. Ensuring the security of agent tools and agent memory, as well as addressing safety in embodied intelligence scenarios such as web agents and computer agents, are critical areas for further investigation." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 309, + 453, + 397, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 453, + 397, + 464 + ], + "spans": [ + { + "bbox": [ + 309, + 453, + 397, + 464 + ], + "type": "text", + "content": "9 CONCLUSION" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 469, + 564, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 469, + 564, + 562 + ], + "spans": [ + { + "bbox": [ + 307, + 469, + 564, + 562 + ], + "type": "text", + "content": "In this survey, we provide a comprehensive analysis of the safety concerns across the entire lifecycle of LLMs, from data preparation and pre-training to post-training, deployment, and commercialization. By introducing the concept of \"fullstack\" safety, we offer an integrated view of the security and safety issues faced by LLMs throughout their development and usage, which addresses gaps in the existing literature that typically focus on specific stages of the lifecycle." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 562, + 565, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 562, + 565, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 562, + 565, + 748 + ], + "type": "text", + "content": "Through an exhaustive review of over " + }, + { + "bbox": [ + 307, + 562, + 565, + 748 + ], + "type": "inline_equation", + "content": "900+" + }, + { + "bbox": [ + 307, + 562, + 565, + 748 + ], + "type": "text", + "content": " papers, we systematically examined and organized the safety issues spanning key stages of LLM production, deployment, and use, including data generation, alignment techniques, model editing, and LLM-based agent systems and LLM-based applications. Our findings highlight the critical vulnerabilities at each stage, such as privacy risks, toxic data, harmful fine-tuning attacks, and deployment challenges. The safety of LLMs is a multifaceted issue requiring careful attention to data integrity, model alignment, and post-deployment security measures. Moreover, we propose promising directions for future research, including improvements in data safety, alignment techniques, and defense mechanisms for LLM-based agents. This work is vital for guiding future efforts to make LLMs safer and more reliable, especially as they become increasingly integral to various industries" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "35" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 78 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 78 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 78 + ], + "type": "text", + "content": "and applications. Ensuring robust security across the entire LLM lifecycle is crucial for their responsible and effective deployment in real-world scenarios." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 102, + 116, + 114 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 102, + 116, + 114 + ], + "spans": [ + { + "bbox": [ + 45, + 102, + 116, + 114 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 122, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 45, + 122, + 301, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 122, + 301, + 192 + ], + "spans": [ + { + "bbox": [ + 45, + 122, + 301, + 192 + ], + "type": "text", + "content": "[1] L. Ouyang, J. Wu, X. Jiang, D. Almeida, C. Wainwright, P. Mishkin, C. Zhang, S. Agarwal, K. Slama, A. Ray et al., \"Training language models to follow instructions with human feedback,\" Advances in neural information processing systems, vol. 35, pp. 27730-27744, 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 192, + 301, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 192, + 301, + 250 + ], + "spans": [ + { + "bbox": [ + 45, + 192, + 301, + 250 + ], + "type": "text", + "content": "[2] H. Touvron, T. Lavril, G. Izacard, X. Martinet, M.-A. Lachaux, T. Lacroix, B. Rozière, N. Goyal, E. Hambro, F. Azhar et al., \"Llama: Open and efficient foundation language models,\" arXiv preprint arXiv:2302.13971, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 250, + 301, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 250, + 301, + 285 + ], + "spans": [ + { + "bbox": [ + 45, + 250, + 301, + 285 + ], + "type": "text", + "content": "[3] J. Bai, S. Bai, Y. Chu, Z. Cui, K. Dang, X. Deng, Y. Fan, W. Ge, Y. Han, F. Huang et al., \"Qwen technical report,\" arXiv preprint arXiv:2309.16609, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 285, + 301, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 285, + 301, + 330 + ], + "spans": [ + { + "bbox": [ + 45, + 285, + 301, + 330 + ], + "type": "text", + "content": "[4] A. Liu, B. Feng, B. Xue, B. Wang, B. Wu, C. Lu, C. Zhao, C. Deng, C. Zhang, C. Ruan et al., \"Deepseek-v3 technical report,\" arXiv preprint arXiv:2412.19437, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 331, + 301, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 331, + 301, + 388 + ], + "spans": [ + { + "bbox": [ + 45, + 331, + 301, + 388 + ], + "type": "text", + "content": "[5] D. Guo, D. Yang, H. Zhang, J. Song, R. Zhang, R. Xu, Q. Zhu, S. Ma, P. Wang, X. Bi et al., \"Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning,\" arXiv preprint arXiv:2501.12948, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 388, + 301, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 388, + 301, + 434 + ], + "spans": [ + { + "bbox": [ + 45, + 388, + 301, + 434 + ], + "type": "text", + "content": "[6] W. X. Zhao, K. Zhou, J. Li, T. Tang, X. Wang, Y. Hou, Y. Min, B. Zhang, J. Zhang, Z. Dong et al., \"A survey of large language models,\" arXiv preprint arXiv:2303.18223, vol. 1, no. 2, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 434, + 301, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 434, + 301, + 491 + ], + "spans": [ + { + "bbox": [ + 45, + 434, + 301, + 491 + ], + "type": "text", + "content": "[7] Y. Chang, X. Wang, J. Wang, Y. Wu, L. Yang, K. Zhu, H. Chen, X. Yi, C. Wang, Y. Wang et al., \"A survey on evaluation of large language models,\" ACM transactions on intelligent systems and technology, vol. 15, no. 3, pp. 1-45, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 492, + 301, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 492, + 301, + 550 + ], + "spans": [ + { + "bbox": [ + 45, + 492, + 301, + 550 + ], + "type": "text", + "content": "[8] M. U. Hadi, R. Qureshi, A. Shah, M. Irfan, A. Zafar, M. B. Shaikh, N. Akhtar, J. Wu, S. Mirjalili et al., \"A survey on large language models: Applications, challenges, limitations, and practical usage,\" Authorea Preprints, vol. 3, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 550, + 301, + 607 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 550, + 301, + 607 + ], + "spans": [ + { + "bbox": [ + 45, + 550, + 301, + 607 + ], + "type": "text", + "content": "[9] Y. Yan, S. Wang, J. Huo, J. Ye, Z. Chu, X. Hu, P. S. Yu, C. Gomes, B. Selman, and Q. Wen, \"Position: Multimodal large language models can significantly advance scientific reasoning,\" arXiv preprint arXiv:2502.02871, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 45, + 608, + 301, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 608, + 301, + 666 + ], + "spans": [ + { + "bbox": [ + 45, + 608, + 301, + 666 + ], + "type": "text", + "content": "[10] Y. Yan, J. Su, J. He, F. Fu, X. Zheng, Y. Lyu, K. Wang, S. Wang, Q. Wen, and X. Hu, “A survey of mathematical reasoning in the era of multimodal large language model: Benchmark, method & challenges,” arXiv preprint arXiv:2412.11936, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 45, + 666, + 301, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 666, + 301, + 723 + ], + "spans": [ + { + "bbox": [ + 45, + 666, + 301, + 723 + ], + "type": "text", + "content": "[11] X. Zou, Y. Yan, X. Hao, Y. Hu, H. Wen, E. Liu, J. Zhang, Y. Li, T. Li, Y. Zheng et al., \"Deep learning for cross-domain data fusion in urban computing: Taxonomy, advances, and outlook,\" Information Fusion, vol. 113, p. 102606, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 45, + 723, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 723, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 45, + 723, + 301, + 747 + ], + "type": "text", + "content": "[12] Y. Li, X. Zhang, L. Luo, H. Chang, Y. Ren, I. King, and J. Li, “G-refer: Graph retrieval-augmented large" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 747 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 333, + 42, + 564, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 42, + 564, + 65 + ], + "spans": [ + { + "bbox": [ + 333, + 42, + 564, + 65 + ], + "type": "text", + "content": "language model for explainable recommendation,\" arXiv preprint arXiv:2502.12586, 2025." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 310, + 65, + 564, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 65, + 564, + 112 + ], + "spans": [ + { + "bbox": [ + 310, + 65, + 564, + 112 + ], + "type": "text", + "content": "[13] S. Sun, R. Liu, J. Lyu, J.-W. Yang, L. Zhang, and X. Li, \"A large language model-driven reward design framework via dynamic feedback for reinforcement learning,\" arXiv preprint arXiv:2410.14660, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 112, + 564, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 112, + 564, + 180 + ], + "spans": [ + { + "bbox": [ + 310, + 112, + 564, + 180 + ], + "type": "text", + "content": "[14] S. Sonko, A. O. Adewusi, O. C. Obi, S. Onwusinkwue, and A. Atadoga, “A critical review towards artificial general intelligence: Challenges, ethical considerations, and the path forward,” World Journal of Advanced Research and Reviews, vol. 21, no. 3, pp. 1262-1268, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 180, + 564, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 180, + 564, + 239 + ], + "spans": [ + { + "bbox": [ + 310, + 180, + 564, + 239 + ], + "type": "text", + "content": "[15] S. McLean, G. J. Read, J. Thompson, C. Baber, N. A. Stanton, and P. M. Salmon, \"The risks associated with artificial general intelligence: A systematic review,\" Journal of Experimental & Theoretical Artificial Intelligence, vol. 35, no. 5, pp. 649-663, 2023." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 239, + 564, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 239, + 564, + 285 + ], + "spans": [ + { + "bbox": [ + 310, + 239, + 564, + 285 + ], + "type": "text", + "content": "[16] R. Liu, J. Gao, J. Zhao, K. Zhang, X. Li, B. Qi, W. Ouyang, and B. Zhou, \"Can 1b llm surpass 405b llm? rethinking compute-optimal test-time scaling,\" arXiv preprint arXiv:2502.06703, 2025." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 285, + 564, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 285, + 564, + 342 + ], + "spans": [ + { + "bbox": [ + 310, + 285, + 564, + 342 + ], + "type": "text", + "content": "[17] J. Ruan, Y. Chen, B. Zhang, Z. Xu, T. Bao, H. Mao, Z. Li, X. Zeng, R. Zhao et al., \"Tptu: Task planning and tool usage of large language model-based ai agents,\" in NeurIPS 2023 Foundation Models for Decision Making Workshop, 2023." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 342, + 564, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 342, + 564, + 399 + ], + "spans": [ + { + "bbox": [ + 310, + 342, + 564, + 399 + ], + "type": "text", + "content": "[18] V. Sorin, E. Klang, M. Sklair-Levy, I. Cohen, D. B. Zippel, N. Balint Lahat, E. Konen, and Y. Barash, \"Large language model (chatgpt) as a support tool for breast tumor board,\" NPJ Breast Cancer, vol. 9, no. 1, p. 44, 2023." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 399, + 564, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 399, + 564, + 458 + ], + "spans": [ + { + "bbox": [ + 310, + 399, + 564, + 458 + ], + "type": "text", + "content": "[19] R. Yang, L. Song, Y. Li, S. Zhao, Y. Ge, X. Li, and Y. Shan, \"Gpt4tools: Teaching large language model to use tools via self-instruction,\" Advances in Neural Information Processing Systems, vol. 36, pp. 71-995-72007, 2023." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 458, + 564, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 458, + 564, + 525 + ], + "spans": [ + { + "bbox": [ + 310, + 458, + 564, + 525 + ], + "type": "text", + "content": "[20] T. Schick, J. Dwivedi-Yu, R. Dessi, R. Raileanu, M. Lomeli, E. Hambro, L. Zettlemoyer, N. Cancedda, and T. Scialom, \"Toolformer: Language models can teach themselves to use tools,\" Advances in Neural Information Processing Systems, vol. 36, pp. 68-59-68-551, 2023." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 526, + 564, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 526, + 564, + 584 + ], + "spans": [ + { + "bbox": [ + 310, + 526, + 564, + 584 + ], + "type": "text", + "content": "[21] W. Zhong, L. Guo, Q. Gao, H. Ye, and Y. Wang, \"Memorybank: Enhancing large language models with long-term memory,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 17, 2024, pp. 19724-19731." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 584, + 564, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 584, + 564, + 632 + ], + "spans": [ + { + "bbox": [ + 310, + 584, + 564, + 632 + ], + "type": "text", + "content": "[22] W. Wang, L. Dong, H. Cheng, X. Liu, X. Yan, J. Gao, and F. Wei, \"Augmenting language models with long-term memory,\" Advances in Neural Information Processing Systems, vol. 36, pp. 74530-74543, 2023." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 632, + 564, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 632, + 564, + 677 + ], + "spans": [ + { + "bbox": [ + 310, + 632, + 564, + 677 + ], + "type": "text", + "content": "[23] Z. Zhang, X. Bo, C. Ma, R. Li, X. Chen, Q. Dai, J. Zhu, Z. Dong, and J.-R. Wen, \"A survey on the memory mechanism of large language model based agents,\" arXiv preprint arXiv:2404.13501, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 677, + 564, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 677, + 564, + 723 + ], + "spans": [ + { + "bbox": [ + 310, + 677, + 564, + 723 + ], + "type": "text", + "content": "[24] J. Huo, Y. Yan, B. Hu, Y. Yue, and X. Hu, \"Mmneuron: Discovering neuron-level domain-specific interpretation in multimodal large language model,\" arXiv preprint arXiv:2406.11193, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 723, + 564, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 723, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 310, + 723, + 564, + 747 + ], + "type": "text", + "content": "[25] W. Liu, X. Huang, X. Zeng, X. Hao, S. Yu, D. Li, S. Wang, W. Gan, Z. Liu, Y. Yu et al., \"Toolace: Win" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "36" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 35 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 42, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 71, + 42, + 301, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 301, + 65 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 301, + 65 + ], + "type": "text", + "content": "ning the points of llm function calling,\" arXiv preprint arXiv:2409.00920, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 66, + 301, + 111 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 66, + 301, + 111 + ], + "spans": [ + { + "bbox": [ + 46, + 66, + 301, + 111 + ], + "type": "text", + "content": "[26] Q. Tang, Z. Deng, H. Lin, X. Han, Q. Liang, B. Cao, and L. Sun, \"Toolalpaca: Generalized tool learning for language models with 3000 simulated cases,\" arXiv preprint arXiv:2306.05301, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 112, + 301, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 112, + 301, + 158 + ], + "spans": [ + { + "bbox": [ + 47, + 112, + 301, + 158 + ], + "type": "text", + "content": "[27] T. Guo, X. Chen, Y. Wang, R. Chang, S. Pei, N. V. Chawla, O. Wiest, and X. Zhang, \"Large language model based multi-agents: A survey of progress and challenges,\" arXiv preprint arXiv:2402.01680, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 158, + 301, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 158, + 301, + 215 + ], + "spans": [ + { + "bbox": [ + 47, + 158, + 301, + 215 + ], + "type": "text", + "content": "[28] L. Wang, C. Ma, X. Feng, Z. Zhang, H. Yang, J. Zhang, Z. Chen, J. Tang, X. Chen, Y. Lin et al., \"A survey on large language model based autonomous agents,\" Frontiers of Computer Science, vol. 18, no. 6, p. 186345, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 215, + 301, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 215, + 301, + 274 + ], + "spans": [ + { + "bbox": [ + 47, + 215, + 301, + 274 + ], + "type": "text", + "content": "[29] Z. Xi, W. Chen, X. Guo, W. He, Y. Ding, B. Hong, M. Zhang, J. Wang, S. Jin, E. Zhou et al., \"The rise and potential of large language model based agents: A survey,\" Science China Information Sciences, vol. 68, no. 2, p. 121101, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 274, + 301, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 274, + 301, + 331 + ], + "spans": [ + { + "bbox": [ + 47, + 274, + 301, + 331 + ], + "type": "text", + "content": "[30] Y. Yan and J. Lee, \"Georeasoner: Reasoning on geospatially grounded context for natural language understanding,\" in Proceedings of the 33rd ACM International Conference on Information and Knowledge Management, 2024, pp. 4163-4167." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 331, + 301, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 331, + 301, + 399 + ], + "spans": [ + { + "bbox": [ + 47, + 331, + 301, + 399 + ], + "type": "text", + "content": "[31] A. Majumdar, K. Yadav, S. Arnaud, J. Ma, C. Chen, S. Silwal, A. Jain, V-P. Berges, T. Wu, J. Vakil et al., \"Where are we in the search for an artificial visual cortex for embodied intelligence?\" Advances in Neural Information Processing Systems, vol. 36, pp. 655-677, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 399, + 301, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 399, + 301, + 458 + ], + "spans": [ + { + "bbox": [ + 47, + 399, + 301, + 458 + ], + "type": "text", + "content": "[32] M. Zhou, H. Dong, H. Song, N. Zheng, W.-H. Chen, and H. Wang, \"Embodied intelligence-based perception, decision-making, and control for autonomous operations of rail transportation,\" IEEE Transactions on Intelligent Vehicles, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 458, + 301, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 458, + 301, + 504 + ], + "spans": [ + { + "bbox": [ + 47, + 458, + 301, + 504 + ], + "type": "text", + "content": "[33] X. Ma, Y. Gao, Y. Wang, R. Wang, X. Wang, Y. Sun, Y. Ding, H. Xu, Y. Chen, Y. Zhao et al., \"Safety at scale: A comprehensive survey of large model safety,\" arXiv preprint arXiv:2502.05206, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 504, + 301, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 504, + 301, + 561 + ], + "spans": [ + { + "bbox": [ + 47, + 504, + 301, + 561 + ], + "type": "text", + "content": "[34] K. Kumar, T. Ashraf, O. Thawakar, R. M. Anwer, H. Cholakkal, M. Shah, M.-H. Yang, P. H. Torr, S. Khan, and F. S. Khan, \"Llm post-training: A deep dive into reasoning large language models,\" arXiv preprint arXiv:2502.21321, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 561, + 301, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 561, + 301, + 619 + ], + "spans": [ + { + "bbox": [ + 47, + 561, + 301, + 619 + ], + "type": "text", + "content": "[35] Z.-Z. Li, D. Zhang, M.-L. Zhang, J. Zhang, Z. Liu, Y. Yao, H. Xu, J. Zheng, P.-J. Wang, X. Chen et al., \"From system 1 to system 2: A survey of reasoning large language models,\" arXiv preprint arXiv:2502.17419, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 619, + 301, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 619, + 301, + 677 + ], + "spans": [ + { + "bbox": [ + 47, + 619, + 301, + 677 + ], + "type": "text", + "content": "[36] Y. Chen, W. Sun, C. Fang, Z. Chen, Y. Ge, T. Han, Q. Zhang, Y. Liu, Z. Chen, and B. Xu, \"Security of language models for code: A systematic literature review,\" ACM Transactions on Software Engineering and Methodology, vol. 1, no. 1, pp. 1-66, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 677, + 301, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 301, + 723 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 301, + 723 + ], + "type": "text", + "content": "[37] W. Qu, Y. Zhou, Y. Wu, T. Xiao, B. Yuan, Y. Li, and J. Zhang, \"Prompt inversion attack against collaborative inference of large language models,\" in IEEE S&P, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 723, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 723, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 47, + 723, + 301, + 747 + ], + "type": "text", + "content": "[38] J. Wu, S. Yang, R. Zhan, Y. Yuan, L. S. Chao, and D. F. Wong, \"A survey on llm-generated text detection: Ne" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 747 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "type": "text", + "content": "cessity, methods, and future directions,\" Computational Linguistics, pp. 1-66, 2025." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 310, + 65, + 564, + 100 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 65, + 564, + 100 + ], + "spans": [ + { + "bbox": [ + 310, + 65, + 564, + 100 + ], + "type": "text", + "content": "[39] H. Wang, J. Li, H. Wu, E. Hovy, and Y. Sun, \"Pre-trained language models and their applications,\" *Engineering*, vol. 25, pp. 51-65, 2023." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 100, + 564, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 100, + 564, + 157 + ], + "spans": [ + { + "bbox": [ + 310, + 100, + 564, + 157 + ], + "type": "text", + "content": "[40] C. Zhou, Q. Li, C. Li, J. Yu, Y. Liu, G. Wang, K. Zhang, C. Ji, Q. Yan, L. He et al., \"A comprehensive survey on pretrained foundation models: A history from bert to chatgpt,\" International Journal of Machine Learning and Cybernetics, pp. 1-65, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 157, + 564, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 157, + 564, + 192 + ], + "spans": [ + { + "bbox": [ + 310, + 157, + 564, + 192 + ], + "type": "text", + "content": "[41] X. Zhang, X. Zhu, and L. Lessard, \"Online data poisoning attacks,\" in Learning for Dynamics and Control. PMLR, 2020, pp. 201-210." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 311, + 192, + 564, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 192, + 564, + 262 + ], + "spans": [ + { + "bbox": [ + 311, + 192, + 564, + 262 + ], + "type": "text", + "content": "[42] M. Goldblum, D. Tsipras, C. Xie, X. Chen, A. Schwarzschild, D. Song, A. Madry, B. Li, and T. Goldstein, \"Dataset security for machine learning: Data poisoning, backdoor attacks, and defenses,\" IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 45, no. 2, pp. 1563-1580, 2022." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 311, + 262, + 564, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 262, + 564, + 319 + ], + "spans": [ + { + "bbox": [ + 311, + 262, + 564, + 319 + ], + "type": "text", + "content": "[43] N. Lukas, A. Salem, R. Sim, S. Tople, L. Wutschitz, and S. Zanella-Béguelin, \"Analyzing leakage of personally identifiable information in language models,\" in 2023 IEEE Symposium on Security and Privacy (SP). IEEE, 2023, pp. 346-363." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 311, + 319, + 564, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 319, + 564, + 399 + ], + "spans": [ + { + "bbox": [ + 311, + 319, + 564, + 399 + ], + "type": "text", + "content": "[44] W. Sun, Y. Chen, C. Fang, Y. Feng, Y. Xiao, A. Guo, Q. Zhang, Y. Liu, B. Xu, and Z. Chen, \"Eliminating backdoors in neural code models for secure code understanding,\" in Proceedings of the 33rd ACM International Conference on the Foundations of Software Engineering. Trondheim, Norway: ACM, Mon 23 - Fri 27 June 2025, pp. 1-23." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 311, + 399, + 564, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 399, + 564, + 457 + ], + "spans": [ + { + "bbox": [ + 311, + 399, + 564, + 457 + ], + "type": "text", + "content": "[45] H. R. Kirk, B. Vidgen, P. Röttger, and S. A. Hale, \"The benefits, risks and bounds of personalizing the alignment of large language models to individuals,\" Nature Machine Intelligence, vol. 6, no. 4, pp. 383-392, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 311, + 457, + 564, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 457, + 564, + 515 + ], + "spans": [ + { + "bbox": [ + 311, + 457, + 564, + 515 + ], + "type": "text", + "content": "[46] Z. Zhou, H. Yu, X. Zhang, R. Xu, F. Huang, and Y. Li, \"How alignment and jailbreak work: Explain llm safety through intermediate hidden states,\" in Findings of the Association for Computational Linguistics: EMNLP 2024, 2024, pp. 2461-2488." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 311, + 515, + 564, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 515, + 564, + 573 + ], + "spans": [ + { + "bbox": [ + 311, + 515, + 564, + 573 + ], + "type": "text", + "content": "[47] X. Qi, Y. Zeng, T. Xie, P.-Y. Chen, R. Jia, P. Mittal, and P. Henderson, \"Fine-tuning aligned language models compromises safety, even when users do not intend to!\" in ICLR, 2024. [Online]. Available: https://openreview.net/forum?id=hTEGyKf0dZ" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 311, + 573, + 564, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 573, + 564, + 642 + ], + "spans": [ + { + "bbox": [ + 311, + 573, + 564, + 642 + ], + "type": "text", + "content": "[48] X. Qi, A. Panda, K. Lyu, X. Ma, S. Roy, A. Beirami, P. Mittal, and P. Henderson, \"Safety alignment should be made more than just a few tokens deep,\" in The Thirteen International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=6Mxhg9PtDE" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 311, + 642, + 564, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 642, + 564, + 700 + ], + "spans": [ + { + "bbox": [ + 311, + 642, + 564, + 700 + ], + "type": "text", + "content": "[49] D. Halawi, A. Wei, E. Wallace, T. T. Wang, N. Hagh-talab, and J. Steinhardt, \"Covert malicious finetuning: Challenges in safeguarding LLM adaptation,\" in Proceedings of the 41st International Conference on Machine Learning. PMLR, 2024, pp. 17298-17312." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 311, + 700, + 564, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 700, + 564, + 734 + ], + "spans": [ + { + "bbox": [ + 311, + 700, + 564, + 734 + ], + "type": "text", + "content": "[50] W. Hawkins, B. Mittelstadt, and C. Russell, \"The effect of fine-tuning on language model toxicity,\" in Neurips Safe Generative AI Workshop 2024, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 311, + 734, + 564, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 734, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 311, + 734, + 564, + 747 + ], + "type": "text", + "content": "[51] J. Huang and J. Zhang, \"A survey on evaluation of" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "37" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 36 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 42, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "type": "text", + "content": "multimodal large language models,\" arXiv preprint arXiv:2408.15769, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 65, + 301, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 65, + 301, + 112 + ], + "spans": [ + { + "bbox": [ + 46, + 65, + 301, + 112 + ], + "type": "text", + "content": "[52] P. Röttger, F. Pernisi, B. Vidgen, and D. Hovy, \"Safetyprompts: a systematic review of open datasets for evaluating and improving large language model safety,\" arXiv preprint arXiv:2404.05399, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 112, + 301, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 112, + 301, + 157 + ], + "spans": [ + { + "bbox": [ + 46, + 112, + 301, + 157 + ], + "type": "text", + "content": "[53] Y. Dong, R. Mu, Y. Zhang, S. Sun, T. Zhang, C. Wu, G. Jin, Y. Qi, J. Hu, J. Meng et al., \"Safeguarding large language models: A survey,\" arXiv preprint arXiv:2406.02622, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 158, + 301, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 158, + 301, + 204 + ], + "spans": [ + { + "bbox": [ + 46, + 158, + 301, + 204 + ], + "type": "text", + "content": "[54] Y. Wang, Y. Pan, Q. Zhao, Y. Deng, Z. Su, L. Du, and T. H. Luan, \"Large model agents: State-of-the-art, cooperation paradigms, security and privacy, and future trends,\" arXiv preprint arXiv:2409.14457, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 205, + 301, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 205, + 301, + 250 + ], + "spans": [ + { + "bbox": [ + 46, + 205, + 301, + 250 + ], + "type": "text", + "content": "[55] G. Zhang, K. Chen, G. Wan, H. Chang, H. Cheng, K. Wang, S. Hu, and L. Bai, \"Evoflow: Evolving diverse agentic workflows on the fly,\" arXiv preprint arXiv:2502.07373, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 251, + 301, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 251, + 301, + 285 + ], + "spans": [ + { + "bbox": [ + 46, + 251, + 301, + 285 + ], + "type": "text", + "content": "[56] G. Zhang, L. Niu, J. Fang, K. Wang, L. Bai, and X. Wang, \"Multi-agent architecture search via agentic supernet,\" arXiv preprint arXiv:2502.04180, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 285, + 301, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 285, + 301, + 342 + ], + "spans": [ + { + "bbox": [ + 46, + 285, + 301, + 342 + ], + "type": "text", + "content": "[57] G. Zhang, Y. Yue, Z. Li, S. Yun, G. Wan, K. Wang, D. Cheng, J. X. Yu, and T. Chen, \"Cut the crap: An economical communication pipeline for llm-based multi-agent systems,\" arXiv preprint arXiv:2410.02506, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 342, + 301, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 342, + 301, + 388 + ], + "spans": [ + { + "bbox": [ + 46, + 342, + 301, + 388 + ], + "type": "text", + "content": "[58] Y. Yue, G. Zhang, B. Liu, G. Wan, K. Wang, D. Cheng, and Y. Qi, \"Masrouter: Learning to route llms for multi-agent systems,\" 2025. [Online]. Available: https://arxiv.org/abs/2502.11133" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 388, + 301, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 388, + 301, + 446 + ], + "spans": [ + { + "bbox": [ + 46, + 388, + 301, + 446 + ], + "type": "text", + "content": "[59] Z. Liang, Y. Xu, Y. Hong, P. Shang, Q. Wang, Q. Fu, and K. Liu, \"A survey of multimodel large language models,\" in Proceedings of the 3rd International Conference on Computer, Artificial Intelligence and Control Engineering, 2024, pp. 405-409." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 447, + 301, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 447, + 301, + 492 + ], + "spans": [ + { + "bbox": [ + 46, + 447, + 301, + 492 + ], + "type": "text", + "content": "[60] S. Zhang, L. Dong, X. Li, S. Zhang, X. Sun, S. Wang, J. Li, R. Hu, T. Zhang, F. Wu et al., \"Instruction tuning for large language models: A survey,\" arXiv preprint arXiv:2308.10792, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 492, + 301, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 492, + 301, + 550 + ], + "spans": [ + { + "bbox": [ + 46, + 492, + 301, + 550 + ], + "type": "text", + "content": "[61] H. Zhao, H. Chen, F. Yang, N. Liu, H. Deng, H. Cai, S. Wang, D. Yin, and M. Du, \"Explainability for large language models: A survey,\" ACM Transactions on Intelligent Systems and Technology, vol. 15, no. 2, pp. 1-38, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 550, + 301, + 595 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 550, + 301, + 595 + ], + "spans": [ + { + "bbox": [ + 46, + 550, + 301, + 595 + ], + "type": "text", + "content": "[62] T. Shen, R. Jin, Y. Huang, C. Liu, W. Dong, Z. Guo, X. Wu, Y. Liu, and D. Xiong, \"Large language model alignment: A survey,\" arXiv preprint arXiv:2309.15025, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 596, + 301, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 596, + 301, + 665 + ], + "spans": [ + { + "bbox": [ + 46, + 596, + 301, + 665 + ], + "type": "text", + "content": "[63] M. A. K. Raiaan, M. S. H. Mukta, K. Fatema, N. M. Fahad, S. Sakib, M. M. J. Mim, J. Ahmad, M. E. Ali, and S. Azam, \"A review on large language models: Architectures, applications, taxonomies, open issues and challenges,\" IEEE access, vol. 12, pp. 26839-26874, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 666, + 301, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 666, + 301, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 666, + 301, + 700 + ], + "type": "text", + "content": "[64] K. S. Kalyan, \"A survey of gpt-3 family large language models including chatgpt and gpt-4,\" Natural Language Processing Journal, vol. 6, p. 100048, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 46, + 700, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 700, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 46, + 700, + 301, + 746 + ], + "type": "text", + "content": "[65] E. Shayegani, M. A. A. Mamun, Y. Fu, P. Zaree, Y. Dong, and N. Abu-Ghazaleh, \"Survey of vulnerabilities in large language models revealed by adversarial attacks,\" arXiv preprint arXiv:2310.10844, 2023." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 310, + 42, + 564, + 89 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 42, + 564, + 89 + ], + "spans": [ + { + "bbox": [ + 310, + 42, + 564, + 89 + ], + "type": "text", + "content": "[66] Y. Yao, J. Duan, K. Xu, Y. Cai, Z. Sun, and Y. Zhang, \"A survey on large language model (llm) security and privacy: The good, the bad, and the ugly,\" High-Confidence Computing, p. 100211, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 89, + 564, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 89, + 564, + 134 + ], + "spans": [ + { + "bbox": [ + 310, + 89, + 564, + 134 + ], + "type": "text", + "content": "[67] L. Qin, Q. Chen, Y. Zhou, Z. Chen, Y. Li, L. Liao, M. Li, W. Che, and P. S. Yu, \"Multilingual large language model: A survey of resources, taxonomy and frontiers,\" arXiv preprint arXiv:2404.04925, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 134, + 564, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 134, + 564, + 192 + ], + "spans": [ + { + "bbox": [ + 310, + 134, + 564, + 192 + ], + "type": "text", + "content": "[68] M. U. Hadi, R. Qureshi, A. Shah, M. Irfan, A. Zafar, M. B. Shaikh, N. Akhtar, J. Wu, S. Mirjalili et al., \"Large language models: a comprehensive survey of its applications, challenges, limitations, and future prospects,\" Authorea Preprints, vol. 1, pp. 1-26, 2023." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 192, + 564, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 192, + 564, + 239 + ], + "spans": [ + { + "bbox": [ + 310, + 192, + 564, + 239 + ], + "type": "text", + "content": "[69] L. Sun, Y. Huang, H. Wang, S. Wu, Q. Zhang, C. Gao, Y. Huang, W. Lyu, Y. Zhang, X. Li et al., \"Trustllm: Trustworthiness in large language models,\" arXiv preprint arXiv:2401.05561, vol. 3, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 239, + 564, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 239, + 564, + 274 + ], + "spans": [ + { + "bbox": [ + 310, + 239, + 564, + 274 + ], + "type": "text", + "content": "[70] B. C. Das, M. H. Amini, and Y. Wu, \"Security and privacy challenges of large language models: A survey,\" ACM Computing Surveys, vol. 57, no. 6, pp. 1-39, 2025." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 274, + 564, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 274, + 564, + 319 + ], + "spans": [ + { + "bbox": [ + 310, + 274, + 564, + 319 + ], + "type": "text", + "content": "[71] F. He, T. Zhu, D. Ye, B. Liu, W. Zhou, and P. S. Yu, \"The emerged security and privacy of llm agent: A survey with case studies,\" arXiv preprint arXiv:2407.19354, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 319, + 564, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 319, + 564, + 365 + ], + "spans": [ + { + "bbox": [ + 310, + 319, + 564, + 365 + ], + "type": "text", + "content": "[72] G. Tie, Z. Zhao, D. Song, F. Wei, R. Zhou, Y. Dai, W. Yin, Z. Yang, J. Yan, Y. Su et al., \"A survey on post-training of large language models,\" arXiv preprint arXiv:2503.06072, 2025." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 365, + 564, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 365, + 564, + 423 + ], + "spans": [ + { + "bbox": [ + 310, + 365, + 564, + 423 + ], + "type": "text", + "content": "[73] Y. Huang, C. Gao, S. Wu, H. Wang, X. Wang, Y. Zhou, Y. Wang, J. Ye, J. Shi, Q. Zhang et al., \"On the trustworthiness of generative foundation models: Guideline, assessment, and perspective,\" arXiv preprint arXiv:2502.14296, 2025." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 423, + 564, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 423, + 564, + 469 + ], + "spans": [ + { + "bbox": [ + 310, + 423, + 564, + 469 + ], + "type": "text", + "content": "[74] M. Yu, F. Meng, X. Zhou, S. Wang, J. Mao, L. Pang, T. Chen, K. Wang, X. Li, Y. Zhang et al., \"A survey on trustworthy llm agents: Threats and countermeasures,\" arXiv preprint arXiv:2503.09648, 2025." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 469, + 564, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 469, + 564, + 585 + ], + "spans": [ + { + "bbox": [ + 310, + 469, + 564, + 585 + ], + "type": "text", + "content": "[75] X. Ma, Y. Gao, Y. Wang, R. Wang, X. Wang, Y. Sun, Y. Ding, H. Xu, Y. Chen, Y. Zhao, H. Huang, Y. Li, J. Zhang, X. Zheng, Y. Bai, Z. Wu, X. Qiu, J. Zhang, Y. Li, J. Sun, C. Wang, J. Gu, B. Wu, S. Chen, T. Zhang, Y. Liu, M. Gong, T. Liu, S. Pan, C. Xie, T. Pang, Y. Dong, R. Jia, Y. Zhang, S. Ma, X. Zhang, N. Gong, C. Xiao, S. Erfani, B. Li, M. Sugiyama, D. Tao, J. Bailey, and Y.-G. Jiang, \"Safety at scale: A comprehensive survey of large model safety,\" 2025. [Online]. Available: https://arxiv.org/abs/2502.05206" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 585, + 564, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 585, + 564, + 643 + ], + "spans": [ + { + "bbox": [ + 310, + 585, + 564, + 643 + ], + "type": "text", + "content": "[76] Y. Huang, L. Sun, H. Wang, S. Wu, Q. Zhang, Y. Li, C. Gao, Y. Huang, W. Lyu, Y. Zhang et al., \"Position: Trustllm: Trustworthiness in large language models,\" in International Conference on Machine Learning. PMLR, 2024, pp. 20166-20270." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 643, + 564, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 643, + 564, + 688 + ], + "spans": [ + { + "bbox": [ + 310, + 643, + 564, + 688 + ], + "type": "text", + "content": "[77] Z. Dong, Z. Zhou, C. Yang, J. Shao, and Y. Qiao, \"Attacks, defenses and evaluations for llm conversation safety: A survey,\" arXiv preprint arXiv:2402.09283, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 689, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 689, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 689, + 564, + 746 + ], + "type": "text", + "content": "[78] G. Penedo, Q. Malartic, D. Hesslow, R. Cojocaru, A. Cappelli, H. Alobeidli, B. Pannier, E. Almazrouei, and J. Launay, \"The refined web dataset for falcon llm: outperforming curated corpora with web data, and web data only,\" arXiv preprint arXiv:2306.01116, 2023." + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "38" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 37 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 100 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 42, + 301, + 100 + ], + "spans": [ + { + "bbox": [ + 47, + 42, + 301, + 100 + ], + "type": "text", + "content": "[79] L. Soldaini, R. Kinney, A. Bhagia, D. Schwenk, D. Atkinson, R. Authur, B. Bogin, K. Chandu, J. Dumas, Y. Elazar et al., \"Dolma: An open corpus of three trillion tokens for language model pretraining research,\" arXiv preprint arXiv:2402.00159, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 100, + 301, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 100, + 301, + 145 + ], + "spans": [ + { + "bbox": [ + 47, + 100, + 301, + 145 + ], + "type": "text", + "content": "[80] J. Kaddour, J. Harris, M. Mozes, H. Bradley, R. Raileanu, and R. McHardy, \"Challenges and applications of large language models,\" arXiv preprint arXiv:2307.10169, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 146, + 301, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 146, + 301, + 215 + ], + "spans": [ + { + "bbox": [ + 47, + 146, + 301, + 215 + ], + "type": "text", + "content": "[81] W. Sun, Y. Chen, G. Tao, C. Fang, X. Zhang, Q. Zhang, and B. Luo, \"Backdooring neural code search,\" in Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics. Toronto, Canada: Association for Computational Linguistics, July 9-14 2023, pp. 9692-9708." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 216, + 301, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 216, + 301, + 297 + ], + "spans": [ + { + "bbox": [ + 47, + 216, + 301, + 297 + ], + "type": "text", + "content": "[82] W. Sun, Y. Chen, M. Yuan, C. Fan, Z. Chen, C. Wang, Y. Liu, B. Xu, and Z. Chen, \"Show me your code! kill code poisoning: A lightweight method based on code naturalness,\" in Proceedings of the IEEE/ACM 47th International Conference on Software Engineering. Ottawa, Ontario, Canada: IEEE Computer Society, Sun 27 April - Sat 3 May 2025, pp. 1-13." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 297, + 301, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 297, + 301, + 354 + ], + "spans": [ + { + "bbox": [ + 47, + 297, + 301, + 354 + ], + "type": "text", + "content": "[83] N. Carlini, M. Jagielski, C. A. Choquette-Choo, D. Paleka, W. Pearce, H. Anderson, A. Terzis, K. Thomas, and F. Tramèr, \"Poisoning web-scale training datasets is practical,\" in 2024 IEEE Symposium on Security and Privacy (SP). IEEE, 2024, pp. 407-425." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 354, + 301, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 354, + 301, + 399 + ], + "spans": [ + { + "bbox": [ + 47, + 354, + 301, + 399 + ], + "type": "text", + "content": "[84] Y. Zhang, J. Rando, I. Evtimov, J. Chi, E. M. Smith, N. Carlini, F. Tramér, and D. Ippolito, \"Persistent pre-training poisoning of llms,\" arXiv preprint arXiv:2410.13722, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 400, + 301, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 400, + 301, + 435 + ], + "spans": [ + { + "bbox": [ + 47, + 400, + 301, + 435 + ], + "type": "text", + "content": "[85] E. Wallace, T. Z. Zhao, S. Feng, and S. Singh, \"Concealed data poisoning attacks on nlp models,\" arXiv preprint arXiv:2010.12563, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 435, + 301, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 435, + 301, + 480 + ], + "spans": [ + { + "bbox": [ + 47, + 435, + 301, + 480 + ], + "type": "text", + "content": "[86] B. Yan, K. Li, M. Xu, Y. Dong, Y. Zhang, Z. Ren, and X. Cheng, \"On protecting the data privacy of large language models (llms): A survey,\" arXiv preprint arXiv:2403.05156, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 481, + 301, + 527 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 481, + 301, + 527 + ], + "spans": [ + { + "bbox": [ + 47, + 481, + 301, + 527 + ], + "type": "text", + "content": "[87] N. Kandpal, E. Wallace, and C. Raffel, \"Deduplicating training data mitigates privacy risks in language models,\" in International Conference on Machine Learning. PMLR, 2022, pp. 10697-10707." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 527, + 301, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 527, + 301, + 573 + ], + "spans": [ + { + "bbox": [ + 47, + 527, + 301, + 573 + ], + "type": "text", + "content": "[88] N. Carlini, D. Ippolito, M. Jagielski, K. Lee, F. Tramer, and C. Zhang, “Quantifying memorization across neural language models,” in The Eleventh International Conference on Learning Representations, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 573, + 301, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 573, + 301, + 619 + ], + "spans": [ + { + "bbox": [ + 47, + 573, + 301, + 619 + ], + "type": "text", + "content": "[89] C. Arnett, E. Jones, I. P. Yamshchikov, and P.-C. Langlais, \"Toxicity of the commons: Curating open-source pre-training data,\" arXiv preprint arXiv:2410.22587, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 619, + 301, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 619, + 301, + 666 + ], + "spans": [ + { + "bbox": [ + 47, + 619, + 301, + 666 + ], + "type": "text", + "content": "[90] K. Lee, D. Ippolito, A. Nystrom, C. Zhang, D. Eck, C. Callison-Burch, and N. Carlini, “Deduplicating training data makes language models better,” arXiv preprint arXiv:2107.06499, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 666, + 301, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 666, + 301, + 700 + ], + "spans": [ + { + "bbox": [ + 47, + 666, + 301, + 700 + ], + "type": "text", + "content": "[91] Y. Li, Y. Jiang, Z. Li, and S. Xia, \"Backdoor learning: A survey.\" IEEE Transactions on Neural Networks and Learning Systems, vol. 35, no. 1, pp. 5-22, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 700, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 700, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 700, + 301, + 746 + ], + "type": "text", + "content": "[92] Y. Zeng, M. Pan, H. Jahagirdar, M. Jin, L. Lyu, and R. Jia, \"How to sift out a clean data subset in the presence of data poisoning?\" arXiv preprint arXiv:2210.06516, 2022." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 310, + 42, + 564, + 88 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 42, + 564, + 88 + ], + "spans": [ + { + "bbox": [ + 310, + 42, + 564, + 88 + ], + "type": "text", + "content": "[93] M. Pan, Y. Zeng, L. Lyu, X. Lin, and R. Jia, “{ASSET}: Robust backdoor data detection across a multiplicity of deep learning paradigms,” in 32nd USENIX Security Symposium (USENIX Security 23), 2023, pp. 2725–2742." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 310, + 88, + 564, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 88, + 564, + 134 + ], + "spans": [ + { + "bbox": [ + 310, + 88, + 564, + 134 + ], + "type": "text", + "content": "[94] Z. Zhang, L. Lyu, W. Wang, L. Sun, and X. Sun, \"How to inject backdoors with better consistency: Logit anchoring on clean data,\" in International Conference on Learning Representations, 2022." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 134, + 564, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 134, + 564, + 169 + ], + "spans": [ + { + "bbox": [ + 310, + 134, + 564, + 169 + ], + "type": "text", + "content": "[95] Z. Zhang, L. Lyu, X. Ma, C. Wang, and X. Sun, \"Fine-mixing: Mitigating backdoors in fine-tuned language models,\" arXiv preprint arXiv:2210.09545, 2022." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 169, + 564, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 169, + 564, + 227 + ], + "spans": [ + { + "bbox": [ + 310, + 169, + 564, + 227 + ], + "type": "text", + "content": "[96] X. Sun, X. Li, Y. Meng, X. Ao, L. Lyu, J. Li, and T. Zhang, \"Defending against backdoor attacks in natural language generation,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 37, no. 4, 2023, pp. 5257-5265." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 227, + 564, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 227, + 564, + 319 + ], + "spans": [ + { + "bbox": [ + 310, + 227, + 564, + 319 + ], + "type": "text", + "content": "[97] S. Longpre, G. Yauney, E. Reif, K. Lee, A. Roberts, B. Zoph, D. Zhou, J. Wei, K. Robinson, D. Mimno et al., \"A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity,\" in Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), 2024, pp. 3245-3276." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 319, + 564, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 319, + 564, + 354 + ], + "spans": [ + { + "bbox": [ + 310, + 319, + 564, + 354 + ], + "type": "text", + "content": "[98] S. Neel and P. Chang, \"Privacy issues in large language models: A survey,\" arXiv preprint arXiv:2312.06717, 2023." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 354, + 564, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 354, + 564, + 389 + ], + "spans": [ + { + "bbox": [ + 310, + 354, + 564, + 389 + ], + "type": "text", + "content": "[99] X. Wu, R. Duan, and J. Ni, \"Unveiling security, privacy, and ethical concerns of chatgpt,\" Journal of Information and Intelligence, vol. 2, no. 2, pp. 102-115, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 388, + 564, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 388, + 564, + 435 + ], + "spans": [ + { + "bbox": [ + 310, + 388, + 564, + 435 + ], + "type": "text", + "content": "[100] M. Gupta, C. Akiri, K. Aryal, E. Parker, and L. Praharaj, \"From chatgpt to threatgpt: Impact of generative ai in cybersecurity and privacy,\" IEEE Access, vol. 11, pp. 80218-80245, 2023." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 435, + 564, + 481 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 435, + 564, + 481 + ], + "spans": [ + { + "bbox": [ + 310, + 435, + 564, + 481 + ], + "type": "text", + "content": "[101] M. Miranda, E. S. Ruzzetti, A. Santilli, F. M. Zanzotto, S. Bratières, and E. Rodolà, “Preserving privacy in large language models: A survey on current threats and solutions,” arXiv preprint arXiv:2408.05212, 2024." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 481, + 564, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 481, + 564, + 538 + ], + "spans": [ + { + "bbox": [ + 310, + 481, + 564, + 538 + ], + "type": "text", + "content": "[102] Q. Zhang, H. Qiu, D. Wang, Y. Li, T. Zhang, W. Zhu, H. Weng, L. Yan, and C. Zhang, “A benchmark for semantic sensitive information in llms outputs,” in The Thirteenth International Conference on Learning Representations, 2025." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 538, + 564, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 538, + 564, + 573 + ], + "spans": [ + { + "bbox": [ + 310, + 538, + 564, + 573 + ], + "type": "text", + "content": "[103] S. Kim, S. Yun, H. Lee, M. Gubri, S. Yoon, and S. J. Oh, \"Propile: C,\" Advances in Neural Information Processing Systems, vol. 36, pp. 20750-20762, 2023." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 573, + 564, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 573, + 564, + 608 + ], + "spans": [ + { + "bbox": [ + 310, + 573, + 564, + 608 + ], + "type": "text", + "content": "[104] H. Li, D. Guo, W. Fan, M. Xu, J. Huang, F. Meng, and Y. Song, \"Multi-step jailbreaking privacy attacks on chatgpt,\" arXiv preprint arXiv:2304.05197, 2023." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 608, + 564, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 608, + 564, + 665 + ], + "spans": [ + { + "bbox": [ + 310, + 608, + 564, + 665 + ], + "type": "text", + "content": "[105] M. S. Ozdayi, C. Peris, J. FitzGerald, C. Dupuy, J. Majmudar, H. Khan, R. Parikh, and R. Gupta, \"Controlling the extraction of memorized data from large language models via prompt-tuning,\" arXiv preprint arXiv:2305.11759, 2023." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 665, + 564, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 665, + 564, + 723 + ], + "spans": [ + { + "bbox": [ + 310, + 665, + 564, + 723 + ], + "type": "text", + "content": "[106] N. Carlini, C. Liu, U. Erlingsson, J. Kos, and D. Song, \"The secret sharer: Evaluating and testing unintended memorization in neural networks,\" in 28th USENIX security symposium (USENIX security 19), 2019, pp. 267-284." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 723, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 723, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 723, + 564, + 746 + ], + "type": "text", + "content": "[107] M. Nasr, N. Carlini, J. Hayase, M. Jagielski, A. F. Cooper, D. Ippolito, C. A. Choquette-Choo, E. Wallace," + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "39" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 38 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 71, + 42, + 301, + 77 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 301, + 77 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 301, + 77 + ], + "type": "text", + "content": "F. Tramér, and K. Lee, \"Scalable extraction of training data from (production) language models,\" arXiv preprint arXiv:2311.17035, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 77, + 301, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 77, + 301, + 135 + ], + "spans": [ + { + "bbox": [ + 47, + 77, + 301, + 135 + ], + "type": "text", + "content": "[108] N. Carlini, F. Tramer, E. Wallace, M. Jagielski, A. Herbert-Voss, K. Lee, A. Roberts, T. Brown, D. Song, U. Erlingsson et al., \"Extracting training data from large language models,\" in 30th USENIX security symposium (USENIX Security 21), 2021, pp. 2633-2650." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 135, + 301, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 135, + 301, + 180 + ], + "spans": [ + { + "bbox": [ + 47, + 135, + 301, + 180 + ], + "type": "text", + "content": "[109] Y. Bai, G. Pei, J. Gu, Y. Yang, and X. Ma, \"Special characters attack: Toward scalable training data extraction from large language models,\" arXiv preprint arXiv:2405.05990, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 180, + 301, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 180, + 301, + 237 + ], + "spans": [ + { + "bbox": [ + 47, + 180, + 301, + 237 + ], + "type": "text", + "content": "[110] Z. Zhou, J. Xiang, C. Chen, and S. Su, “Quantifying and analyzing entity-level memorization in large language models,” in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 17, 2024, pp. 19741-19749." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 238, + 301, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 238, + 301, + 296 + ], + "spans": [ + { + "bbox": [ + 47, + 238, + 301, + 296 + ], + "type": "text", + "content": "[111] X. Yang, Z. Wen, W. Qu, Z. Chen, Z. Xiang, B. Chen, and H. Yao, “Memorization and privacy risks in domain-specific large language models,” in ICLR 2024 Workshop on Reliable and Responsible Foundation Models, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 296, + 301, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 296, + 301, + 342 + ], + "spans": [ + { + "bbox": [ + 47, + 296, + 301, + 342 + ], + "type": "text", + "content": "[112] R. Shokri, M. Stronati, C. Song, and V. Shmatikov, \"Membership inference attacks against machine learning models,\" in 2017 IEEE symposium on security and privacy (SP). IEEE, 2017, pp. 3-18." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 342, + 301, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 342, + 301, + 389 + ], + "spans": [ + { + "bbox": [ + 47, + 342, + 301, + 389 + ], + "type": "text", + "content": "[113] H. Hu, Z. Salcic, L. Sun, G. Dobbie, P. S. Yu, and X. Zhang, \"Membership inference attacks on machine learning: A survey,\" ACM Computing Surveys (CSUR), vol. 54, no. 11s, pp. 1-37, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 388, + 301, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 388, + 301, + 435 + ], + "spans": [ + { + "bbox": [ + 47, + 388, + 301, + 435 + ], + "type": "text", + "content": "[114] N. Carlini, S. Chien, M. Nasr, S. Song, A. Terzis, and F. Tramer, \"Membership inference attacks from first principles,\" in 2022 IEEE symposium on security and privacy (SP). IEEE, 2022, pp. 1897-1914." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 435, + 301, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 435, + 301, + 493 + ], + "spans": [ + { + "bbox": [ + 47, + 435, + 301, + 493 + ], + "type": "text", + "content": "[115] J. Ye, A. Maddi, S. K. Murakonda, V. Bindschaedler, and R. Shokri, \"Enhanced membership inference attacks against machine learning models,\" in Proceedings of the 2022 ACM SIGSAC Conference on Computer and Communications Security, 2022, pp. 3093-3106." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 492, + 301, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 492, + 301, + 538 + ], + "spans": [ + { + "bbox": [ + 47, + 492, + 301, + 538 + ], + "type": "text", + "content": "[116] J. Zhang, D. Das, G. Kamath, and F. Tramère, \"Membership inference attacks cannot prove that a model was trained on your data,\" arXiv preprint arXiv:2409.19798, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 539, + 301, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 539, + 301, + 596 + ], + "spans": [ + { + "bbox": [ + 47, + 539, + 301, + 596 + ], + "type": "text", + "content": "[117] M. Duan, A. Suri, N. Mireshghallah, S. Min, W. Shi, L. Zettlemoyer, Y. Tsvetkov, Y. Choi, D. Evans, and H. Hajishirzi, \"Do membership inference attacks work on large language models?\" arXiv preprint arXiv:2402.07841, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 597, + 301, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 597, + 301, + 643 + ], + "spans": [ + { + "bbox": [ + 47, + 597, + 301, + 643 + ], + "type": "text", + "content": "[118] M. Meeus, I. Shilov, S. Jain, M. Faysse, M. Rei, and Y.-A. de Montjoye, \"Sok: Membership inference attacks on llms are rushing nowhere (and how to fix it),\" arXiv preprint arXiv:2406.17975, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 643, + 301, + 711 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 643, + 301, + 711 + ], + "spans": [ + { + "bbox": [ + 47, + 643, + 301, + 711 + ], + "type": "text", + "content": "[119] Y. He, B. Li, Y. Wang, M. Yang, J. Wang, H. Hu, and X. Zhao, \"Is difficulty calibration all we need? towards more practical membership inference attacks,\" in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 1226-1240." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "type": "text", + "content": "[120] Y. He, B. Li, L. Liu, Z. Ba, W. Dong, Y. Li, Z. Qin, K. Ren, and C. Chen, \"Towards label-only membership inference attack against pre-trained large lan" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 747 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 335, + 42, + 514, + 54 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 514, + 54 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 514, + 54 + ], + "type": "text", + "content": "guage models,\" in USENIX Security, 2025." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 310, + 54, + 564, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 54, + 564, + 112 + ], + "spans": [ + { + "bbox": [ + 310, + 54, + 564, + 112 + ], + "type": "text", + "content": "[121] J. Ren, K. Chen, C. Chen, V. Sehwag, Y. Xing, J. Tang, and L. Lyu, \"Self-comparison for dataset-level membership inference in large (vision-) language model,\" in Proceedings of the ACM on Web Conference 2025, 2025, pp. 910-920." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 112, + 564, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 112, + 564, + 158 + ], + "spans": [ + { + "bbox": [ + 310, + 112, + 564, + 158 + ], + "type": "text", + "content": "[122] A. Albalak, Y. Elazar, S. M. Xie, S. Longpre, N. Lambert, X. Wang, N. Muennighoff, B. Hou, L. Pan, H. Jeong et al., \"A survey on data selection for language models,\" arXiv preprint arXiv:2402.16827, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 158, + 564, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 158, + 564, + 204 + ], + "spans": [ + { + "bbox": [ + 310, + 158, + 564, + 204 + ], + "type": "text", + "content": "[123] P. Maini, S. Goyal, D. Sam, A. Robey, Y. Savani, Y. Jiang, A. Zou, Z. C. Lipton, and J. Z. Kolter, \"Safety pretraining: Toward the next generation of safe ai,\" arXiv preprint arXiv:2504.16980, 2025." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 205, + 564, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 205, + 564, + 251 + ], + "spans": [ + { + "bbox": [ + 310, + 205, + 564, + 251 + ], + "type": "text", + "content": "[124] A. Hurst, A. Lerer, A. P. Goucher, A. Perelman, A. Ramesh, A. Clark, A. Ostrow, A. Welihinda, A. Hayes, A. Radford et al., \"Gpt-4o system card,\" arXiv preprint arXiv:2410.21276, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 251, + 564, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 251, + 564, + 319 + ], + "spans": [ + { + "bbox": [ + 310, + 251, + 564, + 319 + ], + "type": "text", + "content": "[125] S. Li, F. Liu, L. Cui, J. Lu, Q. Xiao, X. Yang, P. Liu, K. Sun, Z. Ma, and X. Wang, \"Safe planner: Empowering safety awareness in large pre-trained models for robot task planning,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 39, no. 14, 2025, pp. 14619-14627." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 319, + 564, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 319, + 564, + 365 + ], + "spans": [ + { + "bbox": [ + 310, + 319, + 564, + 365 + ], + "type": "text", + "content": "[126] J. O'Neill, S. Subramanian, E. Lin, A. Satish, and V. Mugunthan, \"Guardformer: Guardrail instruction pretraining for efficient safeguarding,\" in Neurips Safe Generative AI Workshop 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 365, + 564, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 365, + 564, + 412 + ], + "spans": [ + { + "bbox": [ + 310, + 365, + 564, + 412 + ], + "type": "text", + "content": "[127] T. Huang, S. Hu, F. Ilhan, S. F. Tekin, and L. Liu, \"Harmful fine-tuning attacks and defenses for large language models: A survey,\" arXiv preprint arXiv:2409.18169, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 412, + 564, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 412, + 564, + 458 + ], + "spans": [ + { + "bbox": [ + 310, + 412, + 564, + 458 + ], + "type": "text", + "content": "[128] M. Shu, J. Wang, C. Zhu, J. Geiping, C. Xiao, and T. Goldstein, \"On the exploitability of instruction tuning,\" Advances in Neural Information Processing Systems, vol. 36, pp. 61-836-61-856, 2023." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 458, + 564, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 458, + 564, + 504 + ], + "spans": [ + { + "bbox": [ + 310, + 458, + 564, + 504 + ], + "type": "text", + "content": "[129] J. Xu, M. D. Ma, F. Wang, C. Xiao, and M. Chen, \"Instructions as backdoors: Backdoor vulnerabilities of instruction tuning for large language models,\" arXiv preprint arXiv:2305.14710, 2023." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 504, + 564, + 560 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 504, + 564, + 560 + ], + "spans": [ + { + "bbox": [ + 310, + 504, + 564, + 560 + ], + "type": "text", + "content": "[130] J. Yan, V. Yadav, S. Li, L. Chen, Z. Tang, H. Wang, V. Srinivasan, X. Ren, and H. Jin, \"Backdooring instruction-tuned large language models with virtual prompt injection,\" arXiv preprint arXiv:2307.16888, 2023." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 561, + 564, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 561, + 564, + 620 + ], + "spans": [ + { + "bbox": [ + 310, + 561, + 564, + 620 + ], + "type": "text", + "content": "[131] H. Yao, J. Lou, and Z. Qin, \"Poisonprompt: Backdoor attack on prompt-based large language models,\" in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 7745-7749." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 620, + 564, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 620, + 564, + 666 + ], + "spans": [ + { + "bbox": [ + 310, + 620, + 564, + 666 + ], + "type": "text", + "content": "[132] S. Zhao, J. Wen, L. A. Tuan, J. Zhao, and J. Fu, \"Prompt as triggers for backdoor attack: Examining the vulnerability in language models,\" arXiv preprint arXiv:2305.01219, 2023." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 666, + 564, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 666, + 564, + 712 + ], + "spans": [ + { + "bbox": [ + 310, + 666, + 564, + 712 + ], + "type": "text", + "content": "[133] Z. Han, C. Gao, J. Liu, J. Zhang, and S. Q. Zhang, \"Parameter-efficient fine-tuning for large models: A comprehensive survey,\" arXiv preprint arXiv:2403.14608, 2024." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 712, + 564, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 712, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 310, + 712, + 564, + 747 + ], + "type": "text", + "content": "[134] L. Xu, H. Xie, S.-Z. J. Qin, X. Tao, and F. L. Wang, \"Parameter-efficient fine-tuning methods for pretrained language models: A critical review and" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "40" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 39 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 71, + 42, + 285, + 53 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 285, + 53 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 285, + 53 + ], + "type": "text", + "content": "assessment,\" arXiv preprint arXiv:2312.12148, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 54, + 301, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 54, + 301, + 112 + ], + "spans": [ + { + "bbox": [ + 47, + 54, + 301, + 112 + ], + "type": "text", + "content": "[135] N. Ding, Y. Qin, G. Yang, F. Wei, Z. Yang, Y. Su, S. Hu, Y. Chen, C.-M. Chan, W. Chen et al., \"Parameter-efficient fine-tuning of large-scale pre-trained language models,\" Nature Machine Intelligence, vol. 5, no. 3, pp. 220-235, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 113, + 301, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 113, + 301, + 158 + ], + "spans": [ + { + "bbox": [ + 47, + 113, + 301, + 158 + ], + "type": "text", + "content": "[136] S. Zhao, L. Gan, L. A. Tuan, J. Fu, L. Lyu, M. Jia, and J. Wen, \"Defending against weight-poisoning backdoor attacks for parameter-efficient fine-tuning,\" arXiv preprint arXiv:2402.12168, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 158, + 301, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 158, + 301, + 204 + ], + "spans": [ + { + "bbox": [ + 47, + 158, + 301, + 204 + ], + "type": "text", + "content": "[137] J. Kim, M. Song, S. H. Na, and S. Shin, \"Obliviate: Neutralizing task-agnostic backdoors within the parameter-efficient fine-tuning paradigm,\" arXiv preprint arXiv:2409.14119, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 205, + 301, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 205, + 301, + 250 + ], + "spans": [ + { + "bbox": [ + 47, + 205, + 301, + 250 + ], + "type": "text", + "content": "[138] S. Jiang, S. R. Kadhe, Y. Zhou, F. Ahmed, L. Cai, and N. Baracaldo, \"Turning generative models degenerate: The power of data poisoning attacks,\" arXiv preprint arXiv:2407.12281, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 251, + 301, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 251, + 301, + 297 + ], + "spans": [ + { + "bbox": [ + 47, + 251, + 301, + 297 + ], + "type": "text", + "content": "[139] T. Li, A. K. Sahu, A. Talwalkar, and V. Smith, \"Federated learning: Challenges, methods, and future directions,\" IEEE signal processing magazine, vol. 37, no. 3, pp. 50-60, 2020." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 297, + 301, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 297, + 301, + 331 + ], + "spans": [ + { + "bbox": [ + 47, + 297, + 301, + 331 + ], + "type": "text", + "content": "[140] C. Zhang, Y. Xie, H. Bai, B. Yu, W. Li, and Y. Gao, \"A survey on federated learning,\" Knowledge-Based Systems, vol. 216, p. 106775, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 331, + 301, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 331, + 301, + 365 + ], + "spans": [ + { + "bbox": [ + 47, + 331, + 301, + 365 + ], + "type": "text", + "content": "[141] L. Li, Y. Fan, M. Tse, and K.-Y. Lin, \"A review of applications in federated learning,\" Computers & Industrial Engineering, vol. 149, p. 106854, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 365, + 301, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 365, + 301, + 412 + ], + "spans": [ + { + "bbox": [ + 47, + 365, + 301, + 412 + ], + "type": "text", + "content": "[142] Z. Wang, Z. Shen, Y. He, G. Sun, H. Wang, L. Lyu, and A. Li, \"Flora: Federated fine-tuning large language models with heterogeneous low-rank adaptations,\" arXiv preprint arXiv:2409.05976, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 412, + 301, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 412, + 301, + 446 + ], + "spans": [ + { + "bbox": [ + 47, + 412, + 301, + 446 + ], + "type": "text", + "content": "[143] C. Chen, X. Feng, Y. Li, L. Lyu, J. Zhou, X. Zheng, and J. Yin, \"Integration of large language models and federated learning,\" *Patterns*, vol. 5, no. 12, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 447, + 301, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 447, + 301, + 492 + ], + "spans": [ + { + "bbox": [ + 47, + 447, + 301, + 492 + ], + "type": "text", + "content": "[144] W. Zhuang, C. Chen, and L. Lyu, \"When foundation model meets federated learning: Motivations, challenges, and future directions,\" arXiv preprint arXiv:2306.15546, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 492, + 301, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 492, + 301, + 539 + ], + "spans": [ + { + "bbox": [ + 47, + 492, + 301, + 539 + ], + "type": "text", + "content": "[145] G. Sun, Y. Cong, J. Dong, Q. Wang, L. Lyu, and J. Liu, \"Data poisoning attacks on federated machine learning,\" IEEE Internet of Things Journal, vol. 9, no. 13, pp. 11365-11375, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 539, + 301, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 539, + 301, + 596 + ], + "spans": [ + { + "bbox": [ + 47, + 539, + 301, + 596 + ], + "type": "text", + "content": "[146] L. Lyu, H. Yu, X. Ma, C. Chen, L. Sun, J. Zhao, Q. Yang, and P. S. Yu, \"Privacy and robustness in federated learning: Attacks and defenses,\" IEEE transactions on neural networks and learning systems, vol. 35, no. 7, pp. 8726-8746, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 597, + 301, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 597, + 301, + 643 + ], + "spans": [ + { + "bbox": [ + 47, + 597, + 301, + 643 + ], + "type": "text", + "content": "[147] R. Ye, J. Chai, X. Liu, Y. Yang, Y. Wang, and S. Chen, \"Emerging safety attack and defense in federated instruction tuning of large language models,\" arXiv preprint arXiv:2406.10630, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 643, + 301, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 643, + 301, + 700 + ], + "spans": [ + { + "bbox": [ + 47, + 643, + 301, + 700 + ], + "type": "text", + "content": "[148] Z. Zhang, A. Panda, L. Song, Y. Yang, M. Mahoney, P. Mittal, R. Kannan, and J. Gonzalez, \"Neurotoxin: Durable backdoors in federated learning,\" in International Conference on Machine Learning. PMLR, 2022, pp. 26429-26446." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 700, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 700, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 700, + 301, + 746 + ], + "type": "text", + "content": "[149] T. Fu, M. Sharma, P. Torr, S. B. Cohen, D. Krueger, and F. Berez, “Poisonbench: Assessing large language model vulnerability to data poisoning,” arXiv preprint arXiv:2410.08811, 2024." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 310, + 42, + 564, + 87 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 42, + 564, + 87 + ], + "spans": [ + { + "bbox": [ + 310, + 42, + 564, + 87 + ], + "type": "text", + "content": "[150] P. Pathmanathan, S. Chakraborty, X. Liu, Y. Liang, and F. Huang, \"Is poisoning a real threat to llm alignment? maybe more so than you think,\" arXiv preprint arXiv:2406.12091, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 88, + 564, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 88, + 564, + 134 + ], + "spans": [ + { + "bbox": [ + 310, + 88, + 564, + 134 + ], + "type": "text", + "content": "[151] A. Wan, E. Wallace, S. Shen, and D. Klein, “Poisoning language models during instruction tuning,” in International Conference on Machine Learning. PMLR, 2023, pp. 35413-35425." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 134, + 564, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 134, + 564, + 168 + ], + "spans": [ + { + "bbox": [ + 310, + 134, + 564, + 168 + ], + "type": "text", + "content": "[152] J. Rando and F. Tramer, \"Universal jailbreak backdoors from poisoned human feedback,\" arXiv preprint arXiv:2311.14455, 2023." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 170, + 564, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 170, + 564, + 204 + ], + "spans": [ + { + "bbox": [ + 310, + 170, + 564, + 204 + ], + "type": "text", + "content": "[153] T. Baumgartner, Y. Gao, D. Alon, and D. Metzler, \"Best-of-venom: Attacking rlhf by injecting poisoned preference data,\" arXiv preprint arXiv:2404.05530, 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 205, + 564, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 205, + 564, + 250 + ], + "spans": [ + { + "bbox": [ + 310, + 205, + 564, + 250 + ], + "type": "text", + "content": "[154] B. Chen, H. Guo, G. Wang, Y. Wang, and Q. Yan, \"The dark side of human feedback: Poisoning large language models via user inputs,\" arXiv preprint arXiv:2409.00787, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 251, + 564, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 251, + 564, + 308 + ], + "spans": [ + { + "bbox": [ + 310, + 251, + 564, + 308 + ], + "type": "text", + "content": "[155] Y. Bai, A. Jones, K. Ndousse, A. Askell, A. Chen, N. DasSarma, D. Drain, S. Fort, D. Ganguli, T. Henighan et al., \"Training a helpful and harmless assistant with reinforcement learning from human feedback,\" arXiv preprint arXiv:2204.05862, 2022." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 308, + 564, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 308, + 564, + 354 + ], + "spans": [ + { + "bbox": [ + 310, + 308, + 564, + 354 + ], + "type": "text", + "content": "[156] H. Dong, W. Xiong, B. Pang, H. Wang, H. Zhao, Y. Zhou, N. Jiang, D. Sahoo, C. Xiong, and T. Zhang, \"Rlhf workflow: From reward modeling to online rlhf,\" arXiv preprint arXiv:2405.07863, 2024." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 354, + 564, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 354, + 564, + 412 + ], + "spans": [ + { + "bbox": [ + 310, + 354, + 564, + 412 + ], + "type": "text", + "content": "[157] W. Xiong, H. Dong, C. Ye, Z. Wang, H. Zhong, H. Ji, N. Jiang, and T. Zhang, \"Iterative preference learning from human feedback: Bridging theory and practice for rlhf under kl-constraint,\" arXiv preprint arXiv:2312.11456, 2023." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 412, + 564, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 412, + 564, + 458 + ], + "spans": [ + { + "bbox": [ + 310, + 412, + 564, + 458 + ], + "type": "text", + "content": "[158] H. Lee, S. Phatale, H. Mansoor, K. R. Lu, T. Mesnard, J. Ferret, C. Bishop, E. Hall, V. Carbune, and A. Rastogi, \"Rlaif: Scaling reinforcement learning from human feedback with ai feedback,\" 2023." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 458, + 564, + 516 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 458, + 564, + 516 + ], + "spans": [ + { + "bbox": [ + 310, + 458, + 564, + 516 + ], + "type": "text", + "content": "[159] R. Rafailov, A. Sharma, E. Mitchell, C. D. Manning, S. Ermon, and C. Finn, \"Direct preference optimization: Your language model is secretly a reward model,\" Advances in Neural Information Processing Systems, vol. 36, pp. 53728-53741, 2023." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 516, + 564, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 516, + 564, + 562 + ], + "spans": [ + { + "bbox": [ + 310, + 516, + 564, + 562 + ], + "type": "text", + "content": "[160] J. Wang, J. Wu, M. Chen, Y. Vorobeychik, and C. Xiao, \"Rlhfpoison: Reward poisoning attack for reinforcement learning with human feedback in large language models,\" arXiv preprint arXiv:2311.09641, 2023." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 562, + 564, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 562, + 564, + 608 + ], + "spans": [ + { + "bbox": [ + 310, + 562, + 564, + 608 + ], + "type": "text", + "content": "[161] S. Gunasekar, Y. Zhang, J. Aneja, C. C. T. Mendes, A. Del Giorno, S. Gopi, M. Javaheripi, P. Kauffmann, G. de Rosa, O. Saarikivi et al., \"Textbooks are all you need,\" arXiv preprint arXiv:2306.11644, 2023." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 608, + 564, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 608, + 564, + 653 + ], + "spans": [ + { + "bbox": [ + 310, + 608, + 564, + 653 + ], + "type": "text", + "content": "[162] Y. Li, S. Bubeck, R. Eldan, A. Del Giorno, S. Gunasekar, and Y. T. Lee, \"Textbooks are all you need ii: phi-1.5 technical report,\" arXiv preprint arXiv:2309.05463, 2023." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 654, + 564, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 654, + 564, + 700 + ], + "spans": [ + { + "bbox": [ + 310, + 654, + 564, + 700 + ], + "type": "text", + "content": "[163] J. Zhan, J. Dai, J. Ye, Y. Zhou, D. Zhang, Z. Liu, X. Zhang, R. Yuan, G. Zhang, L. Li et al., \"Anygpt: Unified multimodal llm with discrete sequence modeling,\" arXiv preprint arXiv:2402.12226, 2024." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 700, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 700, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 700, + 564, + 746 + ], + "type": "text", + "content": "[164] H. Wang, C. Liu, N. Xi, Z. Qiang, S. Zhao, B. Qin, and T. Liu, \"Huatuo: Tuning llama model with chinese medical knowledge,\" arXiv preprint arXiv:2304.06975, 2023." + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "type": "text", + "content": "41" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 40 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 87 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 42, + 301, + 87 + ], + "spans": [ + { + "bbox": [ + 47, + 42, + 301, + 87 + ], + "type": "text", + "content": "[165] P. Sutanto, J. Santoso, E. I. Setiawan, and A. P. Wibawa, \"Llm distillation for efficient few-shot multiple choice question answering,\" arXiv preprint arXiv:2412.09807, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 88, + 301, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 88, + 301, + 133 + ], + "spans": [ + { + "bbox": [ + 47, + 88, + 301, + 133 + ], + "type": "text", + "content": "[166] X. Zhu, J. Li, Y. Liu, C. Ma, and W. Wang, \"Distilling mathematical reasoning capabilities into small language models,\" Neural Networks, vol. 179, p. 106594, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 134, + 301, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 134, + 301, + 192 + ], + "spans": [ + { + "bbox": [ + 47, + 134, + 301, + 192 + ], + "type": "text", + "content": "[167] R. Xu, H. Cui, Y. Yu, X. Kan, W. Shi, Y. Zhuang, W. Jin, J. Ho, and C. Yang, \"Knowledge-infused prompting: Assessing and advancing clinical text data generation with large language models,\" arXiv preprint arXiv:2311.00287, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 192, + 301, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 192, + 301, + 238 + ], + "spans": [ + { + "bbox": [ + 47, + 192, + 301, + 238 + ], + "type": "text", + "content": "[168] N. Crispino, K. Montgomery, F. Zeng, D. Song, and C. Wang, \"Agent instructs large language models to be general zero-shot reasoners,\" arXiv preprint arXiv:2310.03710, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 239, + 301, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 239, + 301, + 285 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 301, + 285 + ], + "type": "text", + "content": "[169] C. Li, C. Zhang, Y. Lu, J. Zhang, Q. Sun, X. Wang, J. Wei, G. Wang, Y. Yang, and H. T. Shen, \"Syzygy of thoughts: Improving llm cot with the minimal free resolution,\" arXiv preprint arXiv:2504.09566, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 285, + 301, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 285, + 301, + 331 + ], + "spans": [ + { + "bbox": [ + 47, + 285, + 301, + 331 + ], + "type": "text", + "content": "[170] Z. Chen, K. Liu, Q. Wang, W. Zhang, J. Liu, D. Lin, K. Chen, and F. Zhao, \"Agent-flan: Designing data and methods of effective agent tuning for large language models,\" arXiv preprint arXiv:2403.12881, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 331, + 301, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 331, + 301, + 376 + ], + "spans": [ + { + "bbox": [ + 47, + 331, + 301, + 376 + ], + "type": "text", + "content": "[171] C. Xu, Q. Sun, K. Zheng, X. Geng, P. Zhao, J. Feng, C. Tao, and D. Jiang, \"Wizardlm: Empowering large language models to follow complex instructions,\" arXiv preprint arXiv:2304.12244, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 377, + 301, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 377, + 301, + 423 + ], + "spans": [ + { + "bbox": [ + 47, + 377, + 301, + 423 + ], + "type": "text", + "content": "[172] S. Mukherjee, A. Mitra, G. Jawahar, S. Agarwal, H. Palangi, and A. Awadallah, \"Orca: Progressive learning from complex explanation traces of gpt-4,\" arXiv preprint arXiv:2306.02707, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 423, + 301, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 423, + 301, + 469 + ], + "spans": [ + { + "bbox": [ + 47, + 423, + 301, + 469 + ], + "type": "text", + "content": "[173] Y. Wang, Y. Kordi, S. Mishra, A. Liu, N. A. Smith, D. Khashabi, and H. Hajishirzi, \"Self-instruct: Aligning language models with self-generated instructions,\" arXiv preprint arXiv:2212.10560, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 469, + 301, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 469, + 301, + 515 + ], + "spans": [ + { + "bbox": [ + 47, + 469, + 301, + 515 + ], + "type": "text", + "content": "[174] R. Ri, S. Kiyono, and S. Takase, \"Self-translatabrain: Enhancing cross-lingual transfer of large language models via inherent capability,\" arXiv preprint arXiv:2407.00454, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 516, + 301, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 516, + 301, + 573 + ], + "spans": [ + { + "bbox": [ + 47, + 516, + 301, + 573 + ], + "type": "text", + "content": "[175] J. Ji, M. Liu, J. Dai, X. Pan, C. Zhang, C. Bian, B. Chen, R. Sun, Y. Wang, and Y. Yang, \"Beavertails: Towards improved safety alignment of llm via a human-preference dataset,\" Advances in Neural Information Processing Systems, vol. 36, pp. 24678-24704, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 573, + 301, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 573, + 301, + 620 + ], + "spans": [ + { + "bbox": [ + 47, + 573, + 301, + 620 + ], + "type": "text", + "content": "[176] H. Lightman, V. Kosaraju, Y. Burda, H. Edwards, B. Baker, T. Lee, J. Leike, J. Schulman, I. Sutskever, and K. Cobbe, \"Let's verify step by step,\" in The Twelfth International Conference on Learning Representations, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 620, + 301, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 620, + 301, + 677 + ], + "spans": [ + { + "bbox": [ + 47, + 620, + 301, + 677 + ], + "type": "text", + "content": "[177] R. Nakano, J. Hilton, S. Balaji, J. Wu, L. Ouyang, C. Kim, C. Hesse, S. Jain, V. Kosaraju, W. Saunders et al., \"Webgpt: Browser-assisted question-answering with human feedback,\" arXiv preprint arXiv:2112.09332, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 677, + 301, + 711 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 301, + 711 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 301, + 711 + ], + "type": "text", + "content": "[178] C. Chen, J. Fu, and L. Lyu, \"A pathway towards responsible ai generated content,\" arXiv preprint arXiv:2303.01325, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "type": "text", + "content": "[179] A. Akkus, M. P. Aghdam, M. Li, J. Chu, M. Backes, Y. Zhang, and S. Sav, \"Generated data with fake privacy: Hidden dangers of fine-tuning large lan" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "type": "text", + "content": "guage models on generated data,\" arXiv preprint arXiv:2409.11423, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 65, + 564, + 122 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 65, + 564, + 122 + ], + "spans": [ + { + "bbox": [ + 310, + 65, + 564, + 122 + ], + "type": "text", + "content": "[180] Y. Song, J. Zhang, Z. Tian, Y. Yang, M. Huang, and D. Li, \"Llm-based privacy data augmentation guided by knowledge distillation with a distribution tutor for medical text classification,\" arXiv preprint arXiv:2402.16515, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 122, + 564, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 122, + 564, + 168 + ], + "spans": [ + { + "bbox": [ + 310, + 122, + 564, + 168 + ], + "type": "text", + "content": "[181] A. Kang, J. Y. Chen, Z. Lee-Youngzie, and S. Fu, \"Synthetic data generation with llm for improved depression prediction,\" arXiv preprint arXiv:2411.17672, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 168, + 564, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 168, + 564, + 204 + ], + "spans": [ + { + "bbox": [ + 310, + 168, + 564, + 204 + ], + "type": "text", + "content": "[182] A. Taubenfeld, Y. Dover, R. Reichart, and A. Goldstein, \"Systematic biases in llm simulations of debates,\" arXiv preprint arXiv:2402.04049, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 205, + 564, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 205, + 564, + 251 + ], + "spans": [ + { + "bbox": [ + 310, + 205, + 564, + 251 + ], + "type": "text", + "content": "[183] A. Mishra, G. Nayak, S. Bhattacharya, T. Kumar, A. Shah, and M. Foltin, \"Llm-guided counterfactual data generation for fairer ai,\" in Companion Proceedings of the ACM Web Conference 2024, 2024, pp. 1538-1545." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 251, + 564, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 251, + 564, + 308 + ], + "spans": [ + { + "bbox": [ + 310, + 251, + 564, + 308 + ], + "type": "text", + "content": "[184] Y. Yu, Y. Zhuang, J. Zhang, Y. Meng, A. J. Ratner, R. Krishna, J. Shen, and C. Zhang, \"Large language model as attributed training data generator: A tale of diversity and bias,\" Advances in Neural Information Processing Systems, vol. 36, pp. 55734-55784, 2023." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 308, + 564, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 308, + 564, + 342 + ], + "spans": [ + { + "bbox": [ + 310, + 308, + 564, + 342 + ], + "type": "text", + "content": "[185] A. Borah and R. Mihalcea, \"Towards implicit bias detection and mitigation in multi-agent lvm interactions,\" arXiv preprint arXiv:2410.02584, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 342, + 564, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 342, + 564, + 376 + ], + "spans": [ + { + "bbox": [ + 310, + 342, + 564, + 376 + ], + "type": "text", + "content": "[186] X. Dong, Y. Wang, P. S. Yu, and J. Caverlee, \"Disclosure and mitigation of gender bias in llms,\" arXiv preprint arXiv:2402.11190, 2024." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 376, + 564, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 376, + 564, + 435 + ], + "spans": [ + { + "bbox": [ + 310, + 376, + 564, + 435 + ], + "type": "text", + "content": "[187] I. M. Serouis and F. Sèdes, “Exploring large language models for bias mitigation and fairness,” in 1st International Workshop on AI Governance (AIGOV) in conjunction with the Thirty-Third International Joint Conference on Artificial Intelligence, 2024." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 435, + 564, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 435, + 564, + 504 + ], + "spans": [ + { + "bbox": [ + 310, + 435, + 564, + 504 + ], + "type": "text", + "content": "[188] Y. Chen, Q. Fu, Y. Yuan, Z. Wen, G. Fan, D. Liu, D. Zhang, Z. Li, and Y. Xiao, \"Hallucination detection: Robustly discerning reliable answers in large language models,\" in Proceedings of the 32nd ACM International Conference on Information and Knowledge Management, 2023, pp. 245-255." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 504, + 564, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 504, + 564, + 550 + ], + "spans": [ + { + "bbox": [ + 310, + 504, + 564, + 550 + ], + "type": "text", + "content": "[189] N. Chakraborty, M. Ornik, and K. Driggs-Campbell, \"Hallucination detection in foundation models for decision-making: A flexible definition and review of the state of the art,\" ACM Computing Surveys, 2025." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 550, + 564, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 550, + 564, + 584 + ], + "spans": [ + { + "bbox": [ + 310, + 550, + 564, + 584 + ], + "type": "text", + "content": "[190] E. Entezami and A. Naseh, \"Llm misalignment via adversarial rlhf platforms,\" arXiv preprint arXiv:2503.03039, 2025." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 584, + 564, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 584, + 564, + 631 + ], + "spans": [ + { + "bbox": [ + 310, + 584, + 564, + 631 + ], + "type": "text", + "content": "[191] J. Achiam, S. Adler, S. Agarwal, L. Ahmad, I. Akkaya, F. L. Aleman, D. Almeida, J. Altenschmidt, S. Altman, S. Anadkat et al., \"Gpt-4 technical report,\" arXiv preprint arXiv:2303.08774, 2023." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 631, + 564, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 631, + 564, + 677 + ], + "spans": [ + { + "bbox": [ + 310, + 631, + 564, + 677 + ], + "type": "text", + "content": "[192] A. Young, B. Chen, C. Li, C. Huang, G. Zhang, G. Zhang, G. Wang, H. Li, J. Zhu, J. Chen et al., \"Yi: Open foundation models by 01. ai,\" arXiv preprint arXiv:2403.04652, 2024." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 677, + 564, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 677, + 564, + 723 + ], + "spans": [ + { + "bbox": [ + 310, + 677, + 564, + 723 + ], + "type": "text", + "content": "[193] A. Dubey, A. Jauhri, A. Pandey, A. Kadian, A. Al-Dahle, A. Letman, A. Mathur, A. Schelten, A. Yang, A. Fan et al., \"The llama 3 herd of models,\" arXiv preprint arXiv:2407.21783, 2024." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 723, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 723, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 723, + 564, + 746 + ], + "type": "text", + "content": "[194] Z. Cai, M. Cao, H. Chen, K. Chen, K. Chen, X. Chen, X. Chen, Z. Chen, Z. Chen, P. Chu et al., \"InternlM2" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "42" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 41 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 41, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 71, + 41, + 301, + 64 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 41, + 301, + 64 + ], + "spans": [ + { + "bbox": [ + 71, + 41, + 301, + 64 + ], + "type": "text", + "content": "technical report,\" arXiv preprint arXiv:2403.17297, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 65, + 301, + 111 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 65, + 301, + 111 + ], + "spans": [ + { + "bbox": [ + 46, + 65, + 301, + 111 + ], + "type": "text", + "content": "[195] R. Anil, A. M. Dai, O. First, M. Johnson, D. Lepikhin, A. Passos, S. Shakeri, E. Taropa, P. Bailey, Z. Chen et al., \"Palm 2 technical report,\" arXiv preprint arXiv:2305.10403, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 112, + 301, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 112, + 301, + 158 + ], + "spans": [ + { + "bbox": [ + 47, + 112, + 301, + 158 + ], + "type": "text", + "content": "[196] T. GLM, A. Zeng, B. Xu, B. Wang, C. Zhang, D. Yin, D. Zhang, D. Rojas, G. Feng, H. Zhao et al., \"Chatglm: A family of large language models from glm-130b to glm-4 all tools,\" arXiv preprint arXiv:2406.12793, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 158, + 301, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 158, + 301, + 204 + ], + "spans": [ + { + "bbox": [ + 47, + 158, + 301, + 204 + ], + "type": "text", + "content": "[197] G. Team, R. Anil, S. Borgeaud, J.-B. Alayrac, J. Yu, R. Soricut, J. Schalkwyk, A. M. Dai, A. Hauth, K. Millican et al., \"Gemini: a family of highly capable multimodal models,\" arXiv preprint arXiv:2312.11805, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 205, + 301, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 205, + 301, + 261 + ], + "spans": [ + { + "bbox": [ + 47, + 205, + 301, + 261 + ], + "type": "text", + "content": "[198] G. Team, T. Mesnard, C. Hardin, R. Dadashi, S. Bhupatiraju, S. Pathak, L. Sifre, M. Rivière, M. S. Kale, J. Love et al., \"Gemma: Open models based on gemini research and technology,\" arXiv preprint arXiv:2403.08295, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 262, + 301, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 262, + 301, + 308 + ], + "spans": [ + { + "bbox": [ + 47, + 262, + 301, + 308 + ], + "type": "text", + "content": "[199] D. Groeneveld, I. Beltagy, P. Walsh, A. Bhagia, R. Kinney, O. Tafjord, A. H. Jha, H. Ivison, I. Magnusson, Y. Wang et al., \"Olmo: Accelerating the science of language models,\" arXiv preprint arXiv:2402.00838, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 308, + 301, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 301, + 354 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 301, + 354 + ], + "type": "text", + "content": "[200] B. Adler, N. Agarwal, A. Aithal, D. H. Anh, P. Bhattacharya, A. Brundyn, J. Casper, B. Catanzaro, S. Clay, J. Cohen et al., \"Nemotron-4 340b technical report,\" arXiv preprint arXiv:2406.11704, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 354, + 301, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 354, + 301, + 400 + ], + "spans": [ + { + "bbox": [ + 47, + 354, + 301, + 400 + ], + "type": "text", + "content": "[201] A. Jaech, A. Kalai, A. Lerer, A. Richardson, A. El-Kishky, A. Low, A. Helyar, A. Madry, A. Beutel, A. Carney et al., \"Openai o1 system card,\" arXiv preprint arXiv:2412.16720, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 401, + 301, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 401, + 301, + 435 + ], + "spans": [ + { + "bbox": [ + 47, + 401, + 301, + 435 + ], + "type": "text", + "content": "[202] OpenAI, \"Gpt-4o mini: advancing cost-efficient intelligence,\" 2024, https://openai.com/index/gpt-4o-mini-advancing-cost-efficient-intelligence." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 435, + 301, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 435, + 301, + 480 + ], + "spans": [ + { + "bbox": [ + 47, + 435, + 301, + 480 + ], + "type": "text", + "content": "[203] A. Yang, B. Xiao, B. Wang, B. Zhang, C. Bian, C. Yin, C. Lv, D. Pan, D. Wang, D. Yan et al., \"Baichuan 2: Open large-scale language models,\" arXiv preprint arXiv:2309.10305, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 481, + 301, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 481, + 301, + 539 + ], + "spans": [ + { + "bbox": [ + 47, + 481, + 301, + 539 + ], + "type": "text", + "content": "[204] J. Welbl, A. Glaese, J. Uesato, S. Dathathri, J. Mellor, L. A. Hendricks, K. Anderson, P. Kohli, B. Coppin, and P.-S. Huang, \"Challenges in detoxifying language models,\" in Findings of the Association for Computational Linguistics: EMNLP 2021, 2021, pp. 2447-2469." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 539, + 301, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 539, + 301, + 584 + ], + "spans": [ + { + "bbox": [ + 47, + 539, + 301, + 584 + ], + "type": "text", + "content": "[205] H. Ngo, C. Raterink, J. G. Araújo, I. Zhang, C. Chen, A. Morisot, and N. Frosst, \"Mitigating harm in language models with conditional-likelihood filtration,\" arXiv preprint arXiv:2108.07790, 2021." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 585, + 301, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 585, + 301, + 619 + ], + "spans": [ + { + "bbox": [ + 47, + 585, + 301, + 619 + ], + "type": "text", + "content": "[206] Y. Chen, W. Cai, L. Wu, X. Li, Z. Xin, and C. Fu, \"Tigerbot: An open multilingual multitask llm,\" arXiv preprint arXiv:2312.08688, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 620, + 301, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 620, + 301, + 689 + ], + "spans": [ + { + "bbox": [ + 47, + 620, + 301, + 689 + ], + "type": "text", + "content": "[207] S. Prabhumoye, M. Patwary, M. Shoeybi, and B. Catanzaro, \"Adding instructions during pretraining: Effective way of controlling toxicity in language models,\" in Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics, 2023, pp. 2636-2651." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 689, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 301, + 746 + ], + "type": "text", + "content": "[208] Y. Ge, W. Sun, Y. Lou, C. Fang, Y. Zhang, Y. Li, X. Zhang, Y. Liu, Z. Zhao, and Z. Chen, \"Demonstration attack against in-context learning for code intelligence,\" CoRR, vol. abs/2410.02841, no. 1, pp. 1-17, 2024." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 747 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 310, + 42, + 564, + 100 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 42, + 564, + 100 + ], + "spans": [ + { + "bbox": [ + 310, + 42, + 564, + 100 + ], + "type": "text", + "content": "[209] G. Team, P. Georgiev, V. I. Lei, R. Burnell, L. Bai, A. Gulati, G. Tanzer, D. Vincent, Z. Pan, S. Wang et al., \"Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context,\" arXiv preprint arXiv:2403.05530, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 101, + 564, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 101, + 564, + 156 + ], + "spans": [ + { + "bbox": [ + 310, + 101, + 564, + 156 + ], + "type": "text", + "content": "[210] J. Parmar, S. Prabhumoye, J. Jennings, M. Patwary, S. Subramanian, D. Su, C. Zhu, D. Narayanan, A. Jhunjunwala, A. Dattagupta et al., \"Nemotron-4 15b technical report,\" arXiv preprint arXiv:2402.16819, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 311, + 158, + 564, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 158, + 564, + 215 + ], + "spans": [ + { + "bbox": [ + 311, + 158, + 564, + 215 + ], + "type": "text", + "content": "[211] C. Raffel, N. Shazeer, A. Roberts, K. Lee, S. Narang, M. Matena, Y. Zhou, W. Li, and P. J. Liu, \"Exploring the limits of transfer learning with a unified text-to-text transformer,\" Journal of machine learning research, vol. 21, no. 140, pp. 1-67, 2020." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 311, + 216, + 564, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 216, + 564, + 274 + ], + "spans": [ + { + "bbox": [ + 311, + 216, + 564, + 274 + ], + "type": "text", + "content": "[212] T. Markov, C. Zhang, S. Agarwal, F. E. Nekoul, T. Lee, S. Adler, A. Jiang, and L. Weng, “A holistic approach to undesired content detection in the real world,” in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 37, no. 12, 2023, pp. 15009-15018." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 311, + 274, + 564, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 274, + 564, + 319 + ], + "spans": [ + { + "bbox": [ + 311, + 274, + 564, + 319 + ], + "type": "text", + "content": "[213] A. Dubey, A. Jauhri, A. Pandey, A. Kadian, A. Al-Dahle, A. Letman, A. Mathur, A. Schelten, A. Yang, A. Fan et al., \"The llama 3 herd of models,\" arXiv preprint arXiv:2407.21783, 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 311, + 319, + 564, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 319, + 564, + 365 + ], + "spans": [ + { + "bbox": [ + 311, + 319, + 564, + 365 + ], + "type": "text", + "content": "[214] T. Huang, S. Hu, F. Ilhan, S. F. Tekin, and L. Liu, \"Harmful fine-tuning attacks and defenses for large language models: A survey,\" arXiv preprint arXiv:2409.18169, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 311, + 365, + 564, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 365, + 564, + 423 + ], + "spans": [ + { + "bbox": [ + 311, + 365, + 564, + 423 + ], + "type": "text", + "content": "[215] J. Wu, Y. Xie, Z. Yang, J. Wu, J. Chen, J. Gao, B. Ding, X. Wang, and X. He, \"Towards robust alignment of language models: Distributionally robustifying direct preference optimization,\" arXiv preprint arXiv:2407.07880, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 311, + 423, + 564, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 423, + 564, + 468 + ], + "spans": [ + { + "bbox": [ + 311, + 423, + 564, + 468 + ], + "type": "text", + "content": "[216] Z. Xu, S. Vemuri, K. Panaganti, D. Kalathil, R. Jain, and D. Ramachandran, \"Distributionally robust direct preference optimization,\" arXiv preprint arXiv:2502.01930, 2025." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 311, + 469, + 564, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 469, + 564, + 515 + ], + "spans": [ + { + "bbox": [ + 311, + 469, + 564, + 515 + ], + "type": "text", + "content": "[217] J. Dai, X. Pan, R. Sun, J. Ji, X. Xu, M. Liu, Y. Wang, and Y. Yang, \"Safe rlhf: Safe reinforcement learning from human feedback,\" in The Twelfth International Conference on Learning Representations, 2023." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 311, + 516, + 564, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 516, + 564, + 585 + ], + "spans": [ + { + "bbox": [ + 311, + 516, + 564, + 585 + ], + "type": "text", + "content": "[218] C. O. Retzlaff, S. Das, C. Wayllace, P. Mousavi, M. Afshari, T. Yang, A. Saranti, A. Angerschmid, M. E. Taylor, and A. Holzinger, \"Human-in-the-loop reinforcement learning: A survey and position on requirements, challenges, and opportunities,\" Journal of Artificial Intelligence Research, vol. 79, pp. 359-415, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 311, + 585, + 564, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 585, + 564, + 631 + ], + "spans": [ + { + "bbox": [ + 311, + 585, + 564, + 631 + ], + "type": "text", + "content": "[219] S. Milani, N. Topin, M. Veloso, and F. Fang, \"Explainable reinforcement learning: A survey and comparative review,\" ACM Computing Surveys, vol. 56, no. 7, pp. 1-36, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 311, + 632, + 564, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 632, + 564, + 689 + ], + "spans": [ + { + "bbox": [ + 311, + 632, + 564, + 689 + ], + "type": "text", + "content": "[220] A. Ahmadian, C. Cremer, M. Galle, M. Fadaee, J. Kreutzer, O. Pietquin, A. Üstün, and S. Hooker, \"Back to basics: Revisiting reinforce style optimization for learning from human feedback in llms,\" arXiv preprint arXiv:2402.14740, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 311, + 689, + 564, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 689, + 564, + 734 + ], + "spans": [ + { + "bbox": [ + 311, + 689, + 564, + 734 + ], + "type": "text", + "content": "[221] T. Liu, Z. Qin, J. Wu, J. Shen, M. Khalman, R. Joshi, Y. Zhao, M. Saleh, S. Baumgartner, J. Liu et al., \"Lipo: Listwise preference optimization through learning-torank,\" arXiv preprint arXiv:2402.01878, 2024." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 311, + 735, + 564, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 735, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 311, + 735, + 564, + 747 + ], + "type": "text", + "content": "[222] F. Song, B. Yu, M. Li, H. Yu, F. Huang, Y. Li, and" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "43" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 42 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 71, + 42, + 301, + 87 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 301, + 87 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 301, + 87 + ], + "type": "text", + "content": "H. Wang, \"Preference ranking optimization for human alignment,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 17, 2024, pp. 18990-18998." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 88, + 301, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 88, + 301, + 145 + ], + "spans": [ + { + "bbox": [ + 47, + 88, + 301, + 145 + ], + "type": "text", + "content": "[223] Z. Wang, B. Bi, S. K. Pentyala, K. Ramnath, S. Chaudhuri, S. Mehrotra, X.-B. Mao, S. Asur et al., \"A comprehensive survey of llm alignment techniques: Rlhf, rlaif, ppo, dpo and more,\" arXiv preprint arXiv:2407.16216, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 146, + 301, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 146, + 301, + 215 + ], + "spans": [ + { + "bbox": [ + 47, + 146, + 301, + 215 + ], + "type": "text", + "content": "[224] T. Huang, S. Hu, F. Ilhan, S. F. Tekin, and L. Liu, \"Lisa: Lazy safety alignment for large language models against harmful fine-tuning attack,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=RPChapuXIC" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 216, + 301, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 216, + 301, + 284 + ], + "spans": [ + { + "bbox": [ + 47, + 216, + 301, + 284 + ], + "type": "text", + "content": "[225] T. Huang, S. Hu, and L. Liu, \"Vaccine: Perturbation-aware alignment for large language models against harmful fine-tuning attack,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=lpXDZKiAnt" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 285, + 301, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 285, + 301, + 365 + ], + "spans": [ + { + "bbox": [ + 47, + 285, + 301, + 365 + ], + "type": "text", + "content": "[226] J. Wang, J. Li, Y. Li, X. Qi, J. Hu, Y. Li, P. McDaniel, M. Chen, B. Li, and C. Xiao, \"Backdooralign: Mitigating fine-tuning based jailbreak attack with backdoor enhanced safety alignment,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=1PcjJ5Evta7" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 365, + 301, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 365, + 301, + 446 + ], + "spans": [ + { + "bbox": [ + 47, + 365, + 301, + 446 + ], + "type": "text", + "content": "[227] F. Bianchi, M. Suzgun, G. Attanasio, P. Rottger, D. Jurafsky, T. Hashimoto, and J. Zou, \"Safety-tuned LLaMAs: Lessons from improving the safety of large language models that follow instructions,\" in The Twelfth International Conference on Learning Representations, 2024. [Online]. Available: https://openreview.net/forum?id=gT5hALch9z" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 447, + 301, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 447, + 301, + 515 + ], + "spans": [ + { + "bbox": [ + 47, + 447, + 301, + 515 + ], + "type": "text", + "content": "[228] H. Shen, P.-Y. Chen, P. Das, and T. Chen, \"SEAL: Safety-enhanced aligned LLM fine-tuning via bilevel data selection,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=VHguhvcoM5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 516, + 301, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 516, + 301, + 550 + ], + "spans": [ + { + "bbox": [ + 47, + 516, + 301, + 550 + ], + "type": "text", + "content": "[229] R. Tang, J. Yuan, Y. Li, Z. Liu, R. Chen, and X. Hu, \"Setting the trap: Capturing and defeating backdoor threats in plms through honeypots,\" NeurIPS, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 550, + 301, + 630 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 550, + 301, + 630 + ], + "spans": [ + { + "bbox": [ + 47, + 550, + 301, + 630 + ], + "type": "text", + "content": "[230] C.-Y. Hsu, Y.-L. Tsai, C.-H. Lin, P.-Y. Chen, C.-M. Yu, and C.-Y. Huang, \"Safe loRA: The silver lining of reducing safety risks when finetuning large language models,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=HcifdQZFV" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 632, + 301, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 632, + 301, + 700 + ], + "spans": [ + { + "bbox": [ + 47, + 632, + 301, + 700 + ], + "type": "text", + "content": "[231] R. Hazra, S. Layek, S. Banerjee, and S. Poria, \"Safety arithmetic: A framework for test-time safety alignment of language models by steering parameters and activations,\" in Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, 2024, pp. 21759-21776." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 700, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 700, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 700, + 301, + 746 + ], + "type": "text", + "content": "[232] Y. Du, S. Zhao, D. Zhao, M. Ma, Y. Chen, L. Huo, Q. Yang, D. Xu, and B. Qin, \"MoGU: A framework for enhancing safety of LLMs while preserving their usability,\" in The Thirty-" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 77 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 77 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 77 + ], + "type": "text", + "content": "eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=SrFbgIjb53" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 310, + 77, + 564, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 77, + 564, + 123 + ], + "spans": [ + { + "bbox": [ + 310, + 77, + 564, + 123 + ], + "type": "text", + "content": "[233] X. Yi, S. Zheng, L. Wang, G. de Melo, X. Wang, and L. He, \"Nlsr: Neuron-level safety realignment of large language models against harmful fine-tuning,\" arXiv preprint arXiv:2412.12497, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 310, + 124, + 564, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 124, + 564, + 168 + ], + "spans": [ + { + "bbox": [ + 310, + 124, + 564, + 168 + ], + "type": "text", + "content": "[234] D. Shi, T. Shen, Y. Huang, Z. Li, Y. Leng, R. Jin, C. Liu, X. Wu, Z. Guo, L. Yu et al., \"Large language model safety: A holistic survey,\" arXiv preprint arXiv:2412.17686, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 310, + 170, + 564, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 170, + 564, + 226 + ], + "spans": [ + { + "bbox": [ + 310, + 170, + 564, + 226 + ], + "type": "text", + "content": "[235] B. Ni, Z. Liu, L. Wang, Y. Lei, Y. Zhao, X. Cheng, Q. Zeng, L. Dong, Y. Xia, K. Kenthapadi et al., \"Towards trustworthy retrieval augmented generation for large language models: A survey,\" arXiv preprint arXiv:2502.06872, 2025." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 310, + 227, + 564, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 227, + 564, + 295 + ], + "spans": [ + { + "bbox": [ + 310, + 227, + 564, + 295 + ], + "type": "text", + "content": "[236] F. Berez, T. Fu, A. Prabhu, S. Casper, A. Sanyal, A. Bibi, A. O'Gara, R. Kirk, B. Bucknall, T. Fist, L. Ong, P. Torr, K. Lam, R. Trager, D. Krueger, S. Mindermann, J. Hernández-Orallo, M. Geva, and Y. Gal, \"Open problems in machine unlearning for AI safety,\" CoRR, 2025." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 297, + 564, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 297, + 564, + 353 + ], + "spans": [ + { + "bbox": [ + 310, + 297, + 564, + 353 + ], + "type": "text", + "content": "[237] U. Anwar, A. Saparov, J. Rando, D. Paleka, M. Turpin, P. Hase, E. S. Lubana, E. Jenner, S. Casper, O. Sourbut et al., “Foundational challenges in assuring alignment and safety of large language models,” arXiv preprint arXiv:2404.09932, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 354, + 564, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 354, + 564, + 399 + ], + "spans": [ + { + "bbox": [ + 310, + 354, + 564, + 399 + ], + "type": "text", + "content": "[238] X. Qi, Y. Zeng, T. Xie, P.-Y. Chen, R. Jia, P. Mittal, and P. Henderson, \"Fine-tuning aligned language models compromises safety, even when users do not intend to!\" arXiv preprint arXiv:2310.03693, 2023." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 400, + 564, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 400, + 564, + 446 + ], + "spans": [ + { + "bbox": [ + 310, + 400, + 564, + 446 + ], + "type": "text", + "content": "[239] X. Yang, X. Wang, Q. Zhang, L. Petzold, W. Y. Wang, X. Zhao, and D. Lin, \"Shadow alignment: The ease of subverting safely-aligned language models.(2023),\" arXiv preprint arXiv:2310.02949, 2023." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 447, + 564, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 447, + 564, + 480 + ], + "spans": [ + { + "bbox": [ + 310, + 447, + 564, + 480 + ], + "type": "text", + "content": "[240] Q. Zhan, R. Fang, R. Bindu, A. Gupta, T. Hashimoto, and D. Kang, \"Removing rlhf protections in gpt-4 via fine-tuning,\" arXiv preprint arXiv:2311.05553, 2023." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 481, + 564, + 527 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 481, + 564, + 527 + ], + "spans": [ + { + "bbox": [ + 310, + 481, + 564, + 527 + ], + "type": "text", + "content": "[241] J. Kazdan, L. Yu, R. Schaeffer, C. Cundy, S. Koyejo, and D. Krishnamurthy, \"No, of course i can! refusal mechanisms can be exploited using harmless finetuning data,\" arXiv preprint arXiv:2502.19537, 2025." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 527, + 564, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 527, + 564, + 572 + ], + "spans": [ + { + "bbox": [ + 310, + 527, + 564, + 572 + ], + "type": "text", + "content": "[242] D. Halawi, A. Wei, E. Wallace, T. T. Wang, N. Haghtalab, and J. Steinhardt, \"Covert malicious finetuning: Challenges in safeguarding llm adaptation,\" arXiv preprint arXiv:2406.20053, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 573, + 564, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 573, + 564, + 619 + ], + "spans": [ + { + "bbox": [ + 310, + 573, + 564, + 619 + ], + "type": "text", + "content": "[243] T. Huang, S. Hu, F. Ilhan, S. F. Tekin, and L. Liu, \"Virus: Harmful fine-tuning attack for large language models bypassing guardrail moderation,\" arXiv preprint arXiv:2501.17433, 2025." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 620, + 564, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 620, + 564, + 666 + ], + "spans": [ + { + "bbox": [ + 310, + 620, + 564, + 666 + ], + "type": "text", + "content": "[244] Y. Qiang, X. Zhou, S. Z. Zade, M. A. Roshani, P. Khan-duri, D. Zytko, and D. Zhu, \"Learning to poison large language models during instruction tuning,\" arXiv preprint arXiv:2402.13459, 2024." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 666, + 564, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 666, + 564, + 700 + ], + "spans": [ + { + "bbox": [ + 310, + 666, + 564, + 700 + ], + "type": "text", + "content": "[245] J. Raghuram, G. Kesidis, and D. J. Miller, \"A study of backdoors in instruction fine-tuned language models,\" arXiv preprint arXiv:2406.07778, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 700, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 700, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 700, + 564, + 746 + ], + "type": "text", + "content": "[246] J. Yi, R. Ye, Q. Chen, B. Zhu, S. Chen, D. Lian, G. Sun, X. Xie, and F. Wu, \"On the vulnerability of safety alignment in open-access llms,\" in Findings of the Association for Computational Linguistics ACL 2024," + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "44" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 43 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 42, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 71, + 42, + 160, + 53 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 160, + 53 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 160, + 53 + ], + "type": "text", + "content": "2024, pp. 9236-9260." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 54, + 301, + 88 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 54, + 301, + 88 + ], + "spans": [ + { + "bbox": [ + 46, + 54, + 301, + 88 + ], + "type": "text", + "content": "[247] S. Lermen, C. Rogers-Smith, and J. Ladish, \"Lora finetuning efficiently undoes safety training in llama 2-chat 70b,\" arXiv preprint arXiv:2310.20624, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 89, + 301, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 89, + 301, + 112 + ], + "spans": [ + { + "bbox": [ + 47, + 89, + 301, + 112 + ], + "type": "text", + "content": "[248] L. Piercing, \"Lora-as-an-attack! piercing llm safety under the share-and-play scenario.\"" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 113, + 301, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 113, + 301, + 158 + ], + "spans": [ + { + "bbox": [ + 46, + 113, + 301, + 158 + ], + "type": "text", + "content": "[249] S. Poppi, Z.-X. Yong, Y. He, B. Chern, H. Zhao, A. Yang, and J. Chi, \"Towards understanding the fragility of multilingual llms against fine-tuning attacks,\" arXiv preprint arXiv:2410.18210, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 158, + 301, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 158, + 301, + 203 + ], + "spans": [ + { + "bbox": [ + 47, + 158, + 301, + 203 + ], + "type": "text", + "content": "[250] S. Li, E. C.-H. Ngai, F. Ye, and T. Voigt, \"Peft-as-an-attack! jailbreaking language models during federated parameter-efficient fine-tuning,\" arXiv preprint arXiv:2411.19335, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 205, + 301, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 205, + 301, + 250 + ], + "spans": [ + { + "bbox": [ + 47, + 205, + 301, + 250 + ], + "type": "text", + "content": "[251] N. Razin, S. Malladi, A. Bhaskar, D. Chen, S. Arora, and B. Hanin, \"Unintentional unalignment: Likelihood displacement in direct preference optimization,\" arXiv preprint arXiv:2410.08847, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 251, + 301, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 251, + 301, + 297 + ], + "spans": [ + { + "bbox": [ + 47, + 251, + 301, + 297 + ], + "type": "text", + "content": "[252] R. Xu, Y. Cai, Z. Zhou, R. Gu, H. Weng, Y. Liu, T. Zhang, W. Xu, and H. Qiu, \"Course-correction: Safety alignment using synthetic preferences,\" arXiv preprint arXiv:2407.16637, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 297, + 301, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 297, + 301, + 342 + ], + "spans": [ + { + "bbox": [ + 47, + 297, + 301, + 342 + ], + "type": "text", + "content": "[253] J. Ji, B. Chen, H. Lou, D. Hong, B. Zhang, X. Pan, T. A. Qiu, J. Dai, and Y. Yang, \"Aligner: Efficient alignment by learning to correct,\" Advances in Neural Information Processing Systems, vol. 37, pp. 90853-90890, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 343, + 301, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 343, + 301, + 399 + ], + "spans": [ + { + "bbox": [ + 47, + 343, + 301, + 399 + ], + "type": "text", + "content": "[254] D. Ganguli, L. Lovitt, J. Kernion, A. Askell, Y. Bai, S. Kadavath, B. Mann, E. Perez, N. Schiefer, K. Ndousse et al., \"Red teaming language models to reduce harms: Methods, scaling behaviors, and lessons learned,\" arXiv preprint arXiv:2209.07858, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 401, + 301, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 401, + 301, + 469 + ], + "spans": [ + { + "bbox": [ + 47, + 401, + 301, + 469 + ], + "type": "text", + "content": "[255] T. Xiao, Y. Yuan, H. Zhu, M. Li, and V. G. Honavar, \"Cal-DPO: Calibrated direct preference optimization for language model alignment,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=57OQXxbTbY" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 470, + 301, + 516 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 470, + 301, + 516 + ], + "spans": [ + { + "bbox": [ + 47, + 470, + 301, + 516 + ], + "type": "text", + "content": "[256] S. Guo, B. Zhang, T. Liu, T. Liu, M. Khalman, F. Llinares, A. Rame, T. Mesnard, Y. Zhao, B. Piot et al., \"Direct language model alignment from online ai feedback,\" arXiv preprint arXiv:2402.04792, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 516, + 301, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 516, + 301, + 550 + ], + "spans": [ + { + "bbox": [ + 47, + 516, + 301, + 550 + ], + "type": "text", + "content": "[257] Z. Liu, X. Sun, and Z. Zheng, \"Enhancing llm safety via constrained direct preference optimization,\" arXiv preprint arXiv:2403.02475, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 551, + 301, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 551, + 301, + 631 + ], + "spans": [ + { + "bbox": [ + 47, + 551, + 301, + 631 + ], + "type": "text", + "content": "[258] H. Lee, S. Phatale, H. Mansoor, T. Mesnard, J. Ferret, K. R. Lu, C. Bishop, E. Hall, V. Carbune, A. Rastogi, and S. Prakash, \"RLAIF vs. RLHF: Scaling reinforcement learning from human feedback with AI feedback,\" in *Forty-first International Conference on Machine Learning*, 2024. [Online]. Available: https://openreview.net/forum?id=uydQ2W41KO" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 632, + 301, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 632, + 301, + 688 + ], + "spans": [ + { + "bbox": [ + 47, + 632, + 301, + 688 + ], + "type": "text", + "content": "[259] X. Lu, B. Yu, Y. Lu, H. Lin, H. Yu, L. Sun, X. Han, and Y. Li, \"Sofa: Shielded on-the-fly alignment via priority rule following,\" in Findings of the Association for Computational Linguistics ACL 2024, 2024, pp. 7108-7136." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 689, + 301, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 301, + 734 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 301, + 734 + ], + "type": "text", + "content": "[260] A. Zou, Z. Wang, N. Carlini, M. Nasr, J. Z. Kolter, and M. Fredrikson, \"Universal and transferable adversarial attacks on aligned language models,\" arXiv preprint arXiv:2307.15043, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 734, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 734, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 734, + 301, + 746 + ], + "type": "text", + "content": "[261] P. Chao, A. Robey, E. Dobriban, H. Hassani, G. J." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 76 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 76 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 76 + ], + "type": "text", + "content": "Pappas, and E. Wong, \"Jailbreaking black box large language models in twenty queries,\" arXiv preprint arXiv:2310.08419, 2023." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 77, + 564, + 122 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 77, + 564, + 122 + ], + "spans": [ + { + "bbox": [ + 310, + 77, + 564, + 122 + ], + "type": "text", + "content": "[262] Z. Zhou, J. Xiang, H. Chen, Q. Liu, Z. Li, and S. Su, \"Speak out of turn: Safety vulnerability of large language models in multi-turn dialogue,\" arXiv preprint arXiv:2402.17262, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 124, + 564, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 124, + 564, + 168 + ], + "spans": [ + { + "bbox": [ + 310, + 124, + 564, + 168 + ], + "type": "text", + "content": "[263] Q. Ren, H. Li, D. Liu, Z. Xie, X. Lu, Y. Qiao, L. Sha, J. Yan, L. Ma, and J. Shao, \"Derail yourself: Multi-turn llm jailbreak attack through self-discovered clues,\" arXiv preprint arXiv:2410.10700, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 170, + 564, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 170, + 564, + 228 + ], + "spans": [ + { + "bbox": [ + 310, + 170, + 564, + 228 + ], + "type": "text", + "content": "[264] X. Pang, S. Tang, R. Ye, Y. Xiong, B. Zhang, Y. Wang, and S. Chen, \"Self-alignment of large language models via monopolylogue-based social scene simulation,\" in Proceedings of the 41st International Conference on Machine Learning, 2024, pp. 39-46." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 228, + 564, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 228, + 564, + 274 + ], + "spans": [ + { + "bbox": [ + 310, + 228, + 564, + 274 + ], + "type": "text", + "content": "[265] J. Ji, D. Hong, B. Zhang, B. Chen, J. Dai, B. Zheng, T. Qiu, B. Li, and Y. Yang, \"Pku-saferlhf: Towards multi-level safety alignment for llms with human preference,\" arXiv preprint arXiv:2406.15513, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 274, + 564, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 274, + 564, + 331 + ], + "spans": [ + { + "bbox": [ + 310, + 274, + 564, + 331 + ], + "type": "text", + "content": "[266] T. Mu, A. Helyar, J. Heidecke, J. Achiam, A. Vallone, I. D. Kivlichan, M. Lin, A. Beutel, J. Schulman, and L. Weng, \"Rule based rewards for language model safety,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 331, + 564, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 331, + 564, + 435 + ], + "spans": [ + { + "bbox": [ + 310, + 331, + 564, + 435 + ], + "type": "text", + "content": "[267] X. Tan, S. Shi, X. Qiu, C. Qu, Z. Qi, Y. Xu, and Y. Qi, \"Self-criticism: Aligning large language models with their understanding of helpfulness, honesty, and harmlessness,\" in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: Industry Track, M. Wang and I. Zitouni, Eds. Singapore: Association for Computational Linguistics, Dec. 2023, pp. 650-662. [Online]. Available: https://aclanthology.org/2023.emnlp-industry.62/" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 436, + 564, + 481 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 436, + 564, + 481 + ], + "spans": [ + { + "bbox": [ + 310, + 436, + 564, + 481 + ], + "type": "text", + "content": "[268] M. Y. Guan, M. Joglekar, E. Wallace, S. Jain, B. Barak, A. Heylar, R. Dias, A. Vallone, H. Ren, J. Wei et al., \"Deliberative alignment: Reasoning enables safer language models,\" arXiv preprint arXiv:2412.16339, 2024." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 481, + 564, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 481, + 564, + 561 + ], + "spans": [ + { + "bbox": [ + 310, + 481, + 564, + 561 + ], + "type": "text", + "content": "[269] B. Wei, K. Huang, Y. Huang, T. Xie, X. Qi, M. Xia, P. Mittal, M. Wang, and P. Henderson, \"Assessing the brittleness of safety alignment via pruning and low-rank modifications,\" in *Forty-first International Conference on Machine Learning*, 2024. [Online]. Available: https://openreview.net/forum?id=K6xxnKN2gm" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 562, + 564, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 562, + 564, + 631 + ], + "spans": [ + { + "bbox": [ + 310, + 562, + 564, + 631 + ], + "type": "text", + "content": "[270] A. Arditi, O. B. Obeso, A. Syed, D. Paleka, N. Rimsky, W. Gurnee, and N. Nanda, \"Refusal in language models is mediated by a single direction,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. [Online]. Available: https://openreview.net/forum?id=pH3XAQME6c" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 632, + 564, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 632, + 564, + 700 + ], + "spans": [ + { + "bbox": [ + 310, + 632, + 564, + 700 + ], + "type": "text", + "content": "[271] R. Ye, J. Chai, X. Liu, Y. Yang, Y. Wang, and S. Chen, \"Emerging safety attack and defense in federated instruction tuning of large language models,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=sYNWqQYJhz" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 701, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 701, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 701, + 564, + 746 + ], + "type": "text", + "content": "[272] J. Mukhoti, Y. Gal, P. Torr, and P. K. Dokania, \"Finetuning can cripple foundation models; preserving features may be the solution,\" 2024. [Online]. Available: https://openreview.net/forum?id=VQ7Q6qdp0P" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "45" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 44 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 100 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 42, + 301, + 100 + ], + "spans": [ + { + "bbox": [ + 47, + 42, + 301, + 100 + ], + "type": "text", + "content": "[273] Y. Du, S. Zhao, J. Cao, M. Ma, D. Zhao, F. FAN, T. Liu, and B. Qin, \"Towards secure tuning: Mitigating security risks arising from benign instruction fine-tuning,\" 2024. [Online]. Available: https://openreview.net/forum?id=Egd7Vi1EuA" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 100, + 301, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 100, + 301, + 135 + ], + "spans": [ + { + "bbox": [ + 47, + 100, + 301, + 135 + ], + "type": "text", + "content": "[274] J. Li and J.-E. Kim, \"Safety alignment shouldn't be complicated,\" 2025. [Online]. Available: https://openreview.net/forum?id=9H91juqfgb" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 135, + 301, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 135, + 301, + 192 + ], + "spans": [ + { + "bbox": [ + 47, + 135, + 301, + 192 + ], + "type": "text", + "content": "[275] S. Li, L. Yao, L. Zhang, and Y. Li, \"Safety layers in aligned large language models: The key to LLM security,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=kUH1yPMAn7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 192, + 301, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 192, + 301, + 262 + ], + "spans": [ + { + "bbox": [ + 47, + 192, + 301, + 262 + ], + "type": "text", + "content": "[276] Z. Zhou, H. Yu, X. Zhang, R. Xu, F. Huang, K. Wang, Y. Liu, J. Fang, and Y. Li, \"On the role of attention heads in large language model safety,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=h0Ak8A5yqw" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 262, + 301, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 262, + 301, + 319 + ], + "spans": [ + { + "bbox": [ + 47, + 262, + 301, + 319 + ], + "type": "text", + "content": "[277] M. Li, W. M. Si, M. Backes, Y. Zhang, and Y. Wang, \"SaloRA: Safety-alignment preserved low-rank adaptation,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=GOoVzE9nSj" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 319, + 301, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 319, + 301, + 388 + ], + "spans": [ + { + "bbox": [ + 47, + 319, + 301, + 388 + ], + "type": "text", + "content": "[278] Y. Zong, O. Bohdal, T. Yu, Y. Yang, and T. Hospedales, \"Safety fine-tuning at (almost) no cost: A baseline for vision large language models,\" in *Forty-first International Conference on Machine Learning*, 2024. [Online]. Available: https://openreview.net/forum?id=bWZKvF0g7G" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 388, + 301, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 388, + 301, + 458 + ], + "spans": [ + { + "bbox": [ + 47, + 388, + 301, + 458 + ], + "type": "text", + "content": "[279] F. Eiras, A. Petrov, P. Torr, M. P. Kumar, and A. Bibi, \"Do as i do (safely): Mitigating task-specific fine-tuning risks in large language models,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=IXE5lB6ppV" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 458, + 301, + 505 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 458, + 301, + 505 + ], + "spans": [ + { + "bbox": [ + 47, + 458, + 301, + 505 + ], + "type": "text", + "content": "[280] J. Luo, X. Luo, K. Ding, J. Yuan, Z. Xiao, and M. Zhang, \"Robustft: Robust supervised fine-tuning for large language models under noisy response,\" 2024. [Online]. Available: https://arxiv.org/abs/2412.14922" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 505, + 301, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 505, + 301, + 573 + ], + "spans": [ + { + "bbox": [ + 47, + 505, + 301, + 573 + ], + "type": "text", + "content": "[281] K. Lyu, H. Zhao, X. Gu, D. Yu, A. Goyal, and S. Arora, \"Keeping LLMs aligned after finetuning: The crucial role of prompt templates,\" in ICLR 2024 Workshop on Reliable and Responsible Foundation Models, 2024. [Online]. Available: https://openreview.net/forum?id=XlnpQOn95Z" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 573, + 301, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 573, + 301, + 631 + ], + "spans": [ + { + "bbox": [ + 47, + 573, + 301, + 631 + ], + "type": "text", + "content": "[282] P. Hacker, A. Engel, and M. Mauer, \"Regulating chatgpt and other large generative ai models,\" in Proceedings of the 2023 ACM Conference on Fairness, Accountability, and Transparency. Association for Computing Machinery, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 631, + 301, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 631, + 301, + 689 + ], + "spans": [ + { + "bbox": [ + 47, + 631, + 301, + 689 + ], + "type": "text", + "content": "[283] M. Kolla, S. Salunkhe, E. Chandrasekharan, and K. Saha, \"Llm-mod: Can large language models assist content moderation?\" in Extended Abstracts of the CHI Conference on Human Factors in Computing Systems. Association for Computing Machinery, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 689, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 301, + 746 + ], + "type": "text", + "content": "[284] D. Kumar, Y. A. AbuHashem, and Z. Durmeric, \"Watch your language: Investigating content moderation with large language models,\" Proceedings of the International AAAI Conference on Web and Social Media, 2024." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 747 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 310, + 42, + 564, + 88 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 42, + 564, + 88 + ], + "spans": [ + { + "bbox": [ + 310, + 42, + 564, + 88 + ], + "type": "text", + "content": "[285] H. K. Choi, X. Du, and Y. Li, \"Safety-aware finetuning of large language models,\" in Neurips Safe Generative AI Workshop 2024, 2024. [Online]. Available: https://openreview.net/forum?id=SqL94fLSM7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 310, + 88, + 564, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 88, + 564, + 134 + ], + "spans": [ + { + "bbox": [ + 310, + 88, + 564, + 134 + ], + "type": "text", + "content": "[286] H. Ge, Y. Li, Q. Wang, Y. Zhang, and R. Tang, \"When backdoors speak: Understanding llm backdoor attacks through model-generated explanations,\" arXiv preprint arXiv:2411.12701, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 310, + 134, + 564, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 134, + 564, + 204 + ], + "spans": [ + { + "bbox": [ + 310, + 134, + 564, + 204 + ], + "type": "text", + "content": "[287] B. Yi, T. Huang, S. Chen, T. Li, Z. Liu, Z. Chu, and Y. Li, \"Probe before you talk: Towards black-box defense against backdoor unalignment for large language models,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=EbxYDBhE3S" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 310, + 204, + 564, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 204, + 564, + 239 + ], + "spans": [ + { + "bbox": [ + 310, + 204, + 564, + 239 + ], + "type": "text", + "content": "[288] B. Tran, J. Li, and A. Madry, \"Spectral signatures in backdoor attacks,\" in Advances in Neural Information Processing Systems. Curran Associates, Inc., 2018." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 239, + 564, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 239, + 564, + 285 + ], + "spans": [ + { + "bbox": [ + 310, + 239, + 564, + 285 + ], + "type": "text", + "content": "[289] S. Casper, L. Schulze, O. Patel, and D. Hadfield-Menell, \"Defending against unforeseen failure modes with latent adversarial training,\" 2024. [Online]. Available: https://arxiv.org/abs/2403.05030" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 285, + 564, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 285, + 564, + 342 + ], + "spans": [ + { + "bbox": [ + 310, + 285, + 564, + 342 + ], + "type": "text", + "content": "[290] T. Huang, G. Bhattacharya, P. Joshi, J. Kimball, and L. Liu, \"Antidote: Post-fine-tuning safety alignment for large language models against harmful finetuning,\" 2024. [Online]. Available: https://arxiv.org/abs/2408.09600" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 342, + 564, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 342, + 564, + 376 + ], + "spans": [ + { + "bbox": [ + 310, + 342, + 564, + 376 + ], + "type": "text", + "content": "[291] J. Li, \"Detecting instruction fine-tuning attack on language models with influence function,\" arXiv preprint arXiv:2504.09026, 2025." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 376, + 564, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 376, + 564, + 423 + ], + "spans": [ + { + "bbox": [ + 310, + 376, + 564, + 423 + ], + "type": "text", + "content": "[292] X. Yi, S. Zheng, L. Wang, X. Wang, and L. He, \"A safety realignment framework via subspace-oriented model fusion for large language models,\" Knowledge-Based Systems, 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 423, + 564, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 423, + 564, + 469 + ], + "spans": [ + { + "bbox": [ + 310, + 423, + 564, + 469 + ], + "type": "text", + "content": "[293] M. Zhu, Y. Weng, L. Yang, Y. Wei, N. Zhang, and Y. Zhang, \"Locking down the finetuned LLMs safety,\" 2025. [Online]. Available: https://openreview.net/forum?id=YGoFl5KKFc" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 469, + 564, + 516 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 469, + 564, + 516 + ], + "spans": [ + { + "bbox": [ + 310, + 469, + 564, + 516 + ], + "type": "text", + "content": "[294] D. Wu, X. Lu, Y. Zhao, and B. Qin, \"Separate the wheat from the chaff: A post-hoc approach to safety re-alignment for fine-tuned language models,\" 2025. [Online]. Available: https://arxiv.org/abs/2412.11041" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 516, + 564, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 516, + 564, + 573 + ], + "spans": [ + { + "bbox": [ + 310, + 516, + 564, + 573 + ], + "type": "text", + "content": "[295] Y. Wang, T. Huang, L. Shen, H. Yao, H. Luo, R. Liu, N. Tan, J. Huang, and D. Tao, \"Panacea: Mitigating harmful fine-tuning for large language models via post-fine-tuning perturbation,\" 2025. [Online]. Available: https://arxiv.org/abs/2501.18100" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 573, + 564, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 573, + 564, + 642 + ], + "spans": [ + { + "bbox": [ + 310, + 573, + 564, + 642 + ], + "type": "text", + "content": "[296] Q. Liu, C. Shang, L. Liu, N. Pappas, J. Ma, N. A. John, S. Doss, L. Marquez, M. Ballesteros, and Y. Benajiba, \"Unraveling and mitigating safety alignment degradation of vision-language models,\" 2025. [Online]. Available: https://openreview.net/forum?id=EEWpE9cR27" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 642, + 564, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 642, + 564, + 688 + ], + "spans": [ + { + "bbox": [ + 310, + 642, + 564, + 688 + ], + "type": "text", + "content": "[297] S. Xu, L. Pang, Y. Zhu, H. Shen, and X. Cheng, \"Cross-modal safety mechanism transfer in large vision-language models,\" arXiv preprint arXiv:2410.12662, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 689, + 564, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 689, + 564, + 723 + ], + "spans": [ + { + "bbox": [ + 310, + 689, + 564, + 723 + ], + "type": "text", + "content": "[298] S. Li, L. Yao, L. Zhang, and Y. Li, \"Safety layers in aligned large language models: The key to llm security,\" arXiv preprint arXiv:2408.17003, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 723, + 564, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 723, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 310, + 723, + 564, + 747 + ], + "type": "text", + "content": "[299] W. Zhao, Z. Li, Y. Li, Y. Zhang, and J. Sun, \"Defending large language models against jailbreak" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "46" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 45 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "type": "text", + "content": "attacks via layer-specific editing,\" 2024. [Online]. Available: https://arxiv.org/abs/2405.18166" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 66, + 301, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 66, + 301, + 123 + ], + "spans": [ + { + "bbox": [ + 47, + 66, + 301, + 123 + ], + "type": "text", + "content": "[300] NIST, \"Artificial intelligence risk management framework: Generative artificial intelligence profile (initial public draft),\" 2024, accessed: 2025-05-29. [Online]. Available: https://nvlpubs.nist.gov/nistpubs/ai/NIST.AI.800-1.ipd.pdf" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 124, + 301, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 124, + 301, + 168 + ], + "spans": [ + { + "bbox": [ + 47, + 124, + 301, + 168 + ], + "type": "text", + "content": "[301] X. Qi, B. Wei, N. Carlini, Y. Huang, T. Xie, L. He, M. Jagielski, M. Nasr, P. Mittal, and P. Henderson, \"On Evaluating the Durability of Safeguards for Open-Weight LLMs,\" Dec. 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 170, + 301, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 170, + 301, + 238 + ], + "spans": [ + { + "bbox": [ + 47, + 170, + 301, + 238 + ], + "type": "text", + "content": "[302] D. Rosati, J. Wehner, K. Williams, L. Bartoszcze, R. Gonzales, C. Maple, S. Majumdar, H. Sajjad, and F. Rudzicz, \"Representation Noising: A Defence Mechanism Against Harmful Finetuning,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, Nov. 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 239, + 301, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 239, + 301, + 296 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 301, + 296 + ], + "type": "text", + "content": "[303] R. Tamirisa, B. Bharathi, L. Phan, A. Zhou, A. Gatti, T. Suresh, M. Lin, J. Wang, R. Wang, R. Arel, A. Zou, D. Song, B. Li, D. Hendrycks, and M. Mazeika, \"Tamper-Resistant Safeguards for Open-Weight LLMs,\" Feb. 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 297, + 301, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 297, + 301, + 354 + ], + "spans": [ + { + "bbox": [ + 47, + 297, + 301, + 354 + ], + "type": "text", + "content": "[304] D. Rosati, J. Wehner, K. Williams, L. Bartoszcze, H. Sajjad, and F. Rudzicz, \"Immunization against harmful fine-tuning attacks,\" in Findings of the Association for Computational Linguistics: EMNLP 2024. Association for Computational Linguistics, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 354, + 301, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 354, + 301, + 411 + ], + "spans": [ + { + "bbox": [ + 47, + 354, + 301, + 411 + ], + "type": "text", + "content": "[305] M. Mazeika, L. Phan, X. Yin, A. Zou, Z. Wang, N. Mu, E. Sakhaee, N. Li, S. Basart, B. Li et al., \"Harmbench: A standardized evaluation framework for automated red teaming and robust refusal,\" arXiv preprint arXiv:2402.04249, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 412, + 301, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 412, + 301, + 480 + ], + "spans": [ + { + "bbox": [ + 47, + 412, + 301, + 480 + ], + "type": "text", + "content": "[306] P. Chao, E. Debenedetti, A. Robey, M. Andriushchenko, F. Croce, V. Sehwag, E. Dobriban, N. Flammarion, G. J. Pappas, F. Tramer et al., \"Jailbreakbench: An open robustness benchmark for jailbreaking large language models,\" arXiv preprint arXiv:2404.01318, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 481, + 301, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 481, + 301, + 525 + ], + "spans": [ + { + "bbox": [ + 47, + 481, + 301, + 525 + ], + "type": "text", + "content": "[307] S. Liu, S. Cui, H. Bu, Y. Shang, and X. Zhang, \"Jail-bench: A comprehensive chinese security assessment benchmark for large language models,\" arXiv preprint arXiv:2502.18935, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 527, + 301, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 527, + 301, + 561 + ], + "spans": [ + { + "bbox": [ + 47, + 527, + 301, + 561 + ], + "type": "text", + "content": "[308] J. Cui, W.-L. Chiang, I. Stoica, and C.-J. Hsieh, \"Or-bench: An over-refusal benchmark for large language models,\" arXiv preprint arXiv:2405.20947, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 562, + 301, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 562, + 301, + 619 + ], + "spans": [ + { + "bbox": [ + 47, + 562, + 301, + 619 + ], + "type": "text", + "content": "[309] T. Xie, X. Qi, Y. Zeng, Y. Huang, U. M. Sehwag, K. Huang, L. He, B. Wei, D. Li, Y. Sheng et al., \"Sorry-bench: Systematically evaluating large language model safety refusal behaviors,\" arXiv preprint arXiv:2406.14598, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 620, + 301, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 620, + 301, + 677 + ], + "spans": [ + { + "bbox": [ + 47, + 620, + 301, + 677 + ], + "type": "text", + "content": "[310] L. Zheng, W.-L. Chiang, Y. Sheng, S. Zhuang, Z. Wu, Y. Zhuang, Z. Lin, Z. Li, D. Li, E. Xing et al., \"Judging llm-as-a-judge with mt-bench and chatbot arena,\" Advances in Neural Information Processing Systems, vol. 36, pp. 46595-46623, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 677, + 301, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 301, + 723 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 301, + 723 + ], + "type": "text", + "content": "[311] Z. Wang, S. Hu, S. Zhao, X. Lin, F. Juefei-Xu, Z. Li, L. Han, H. Subramanyam, L. Chen, J. Chen et al., \"Mllm-as-a-judge for image safety without human labeling,\" arXiv preprint arXiv:2501.00192, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 724, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 724, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 47, + 724, + 301, + 747 + ], + "type": "text", + "content": "[312] D. Rosati, J. Wehner, K. Williams, L. Bartoszcze, D. Atanasov, R. Gonzales, S. Majumdar, C. Maple," + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 747 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 77 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 77 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 77 + ], + "type": "text", + "content": "H. Sajjad, and F. Rudzicz, \"Representation noising effectively prevents harmful fine-tuning on llms,\" arXiv e-prints, pp. arXiv-2405, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 310, + 77, + 564, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 77, + 564, + 157 + ], + "spans": [ + { + "bbox": [ + 310, + 77, + 564, + 157 + ], + "type": "text", + "content": "[313] H. Zhang, J. Huang, K. Mei, Y. Yao, Z. Wang, C. Zhan, H. Wang, and Y. Zhang, \"Agent security bench (ASB): Formalizing and benchmarking attacks and defenses in LLM-based agents,\" in The Thirteenth International Conference on Learning Representations, 2025. [Online]. Available: https://openreview.net/forum?id=V4y0CpX4hK" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 158, + 564, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 158, + 564, + 203 + ], + "spans": [ + { + "bbox": [ + 310, + 158, + 564, + 203 + ], + "type": "text", + "content": "[314] T. Yuan, Z. He, L. Dong, Y. Wang, R. Zhao, T. Xia, L. Xu, B. Zhou, F. Li, Z. Zhang et al., \"R-judge: Benchmarking safety risk awareness for llm agents,\" arXiv preprint arXiv:2401.10019, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 205, + 564, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 205, + 564, + 250 + ], + "spans": [ + { + "bbox": [ + 310, + 205, + 564, + 250 + ], + "type": "text", + "content": "[315] Z. Zhang, L. Lei, L. Wu, R. Sun, Y. Huang, C. Long, X. Liu, X. Lei, J. Tang, and M. Huang, \"Safetybench: Evaluating the safety of large language models,\" arXiv preprint arXiv:2309.07045, 2023." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 251, + 564, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 251, + 564, + 296 + ], + "spans": [ + { + "bbox": [ + 310, + 251, + 564, + 296 + ], + "type": "text", + "content": "[316] L. Li, B. Dong, R. Wang, X. Hu, W. Zuo, D. Lin, Y. Qiao, and J. Shao, \"Salad-bench: A hierarchical and comprehensive safety benchmark for large language models,\" arXiv preprint arXiv:2402.05044, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 297, + 564, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 297, + 564, + 342 + ], + "spans": [ + { + "bbox": [ + 310, + 297, + 564, + 342 + ], + "type": "text", + "content": "[317] K. Cobbe, V. Kosaraju, M. Bavarian, M. Chen, H. Jun, L. Kaiser, M. Plappert, J. Tworek, J. Hilton, R. Nakano et al., \"Training verifiers to solve math word problems,\" arXiv preprint arXiv:2110.14168, 2021." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 342, + 564, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 342, + 564, + 388 + ], + "spans": [ + { + "bbox": [ + 310, + 342, + 564, + 388 + ], + "type": "text", + "content": "[318] S.-Y. Miao, C.-C. Liang, and K.-Y. Su, \"A diverse corpus for evaluating and developing english math word problem solvers,\" arXiv preprint arXiv:2106.15772, 2021." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 388, + 564, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 388, + 564, + 446 + ], + "spans": [ + { + "bbox": [ + 310, + 388, + 564, + 446 + ], + "type": "text", + "content": "[319] E. Glazer, E. Erdil, T. Besiroglu, D. Chicharro, E. Chen, A. Gunning, C. F. Olsson, J.-S. Denain, A. Ho, E. d. O. Santos et al., \"Frontiermath: A benchmark for evaluating advanced mathematical reasoning in ai,\" arXiv preprint arXiv:2411.04872, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 447, + 564, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 447, + 564, + 492 + ], + "spans": [ + { + "bbox": [ + 310, + 447, + 564, + 492 + ], + "type": "text", + "content": "[320] M. Chen, J. Tworek, H. Jun, Q. Yuan, H. P. D. O. Pinto, J. Kaplan, H. Edwards, Y. Burda, N. Joseph, G. Brockman et al., \"Evaluating large language models trained on code,\" arXiv preprint arXiv:2107.03374, 2021." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 493, + 564, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 493, + 564, + 538 + ], + "spans": [ + { + "bbox": [ + 310, + 493, + 564, + 538 + ], + "type": "text", + "content": "[321] C. E. Jimenez, J. Yang, A. Wettig, S. Yao, K. Pei, O. Press, and K. Narasimhan, \"Swe-bench: Can language models resolve real-world github issues?\" arXiv preprint arXiv:2310.06770, 2023." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 539, + 564, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 539, + 564, + 573 + ], + "spans": [ + { + "bbox": [ + 310, + 539, + 564, + 573 + ], + "type": "text", + "content": "[322] X. Zhang, J. Zhao, and Y. LeCun, \"Character-level convolutional networks for text classification,\" Advances in neural information processing systems, vol. 28, 2015." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 574, + 564, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 574, + 564, + 618 + ], + "spans": [ + { + "bbox": [ + 310, + 574, + 564, + 618 + ], + "type": "text", + "content": "[323] H. Luo, Y. Jin, X. Liu, T. Shang, R. Chen, and Z. Liu, \"Geic: Universal and multilingual named entity recognition with large language models,\" arXiv preprint arXiv:2409.11022, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 619, + 564, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 619, + 564, + 665 + ], + "spans": [ + { + "bbox": [ + 310, + 619, + 564, + 665 + ], + "type": "text", + "content": "[324] X. Li, T. Zhang, Y. Dubois, R. Taori, I. Gulrajani, C. Guestrin, P. Liang, and T. B. Hashimoto, \"Alpaca-eval: An automatic evaluator of instruction-following models,\" 2023." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 666, + 564, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 666, + 564, + 723 + ], + "spans": [ + { + "bbox": [ + 310, + 666, + 564, + 723 + ], + "type": "text", + "content": "[325] W.-L. Chiang, L. Zheng, Y. Sheng, A. N. Angelopoulos, T. Li, D. Li, B. Zhu, H. Zhang, M. Jordan, J. E. Gonzalez et al., \"Chatbot arena: An open platform for evaluating llms by human preference,\" in *Forty-first International Conference on Machine Learning*, 2024." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 724, + 564, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 724, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 310, + 724, + 564, + 747 + ], + "type": "text", + "content": "[326] B. Gliwa, I. Mochol, M. Biesek, and A. Wawer, \"Samsum corpus: A human-annotated dialogue" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "47" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 46 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 71, + 42, + 301, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 301, + 65 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 301, + 65 + ], + "type": "text", + "content": "dataset for abstractive summarization,\" arXiv preprint arXiv:1911.12237, 2019." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 66, + 301, + 100 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 66, + 301, + 100 + ], + "spans": [ + { + "bbox": [ + 47, + 66, + 301, + 100 + ], + "type": "text", + "content": "[327] M. Macháček and O. Bojar, \"Results of the wmt14 metrics shared task,\" in Proceedings of the Ninth Workshop on Statistical Machine Translation, 2014, pp. 293-301." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 101, + 301, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 101, + 301, + 146 + ], + "spans": [ + { + "bbox": [ + 47, + 101, + 301, + 146 + ], + "type": "text", + "content": "[328] X. Lu, D. Liu, Y. Yu, L. Xu, and J. Shao, \"X-boundary: Establishing exact safety boundary to shield llms from multi-turn jailbreaks without compromising usability,\" arXiv preprint arXiv:2502.09990, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 147, + 301, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 147, + 301, + 169 + ], + "spans": [ + { + "bbox": [ + 47, + 147, + 301, + 169 + ], + "type": "text", + "content": "[329] OpenAI, \"Moderation api,\" https://platform.openai.com/docs/guides/moderation/overview, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 170, + 301, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 170, + 301, + 215 + ], + "spans": [ + { + "bbox": [ + 47, + 170, + 301, + 215 + ], + "type": "text", + "content": "[330] H. Inan, K. Upasani, J. Chi, R. Rungta, K. Iyer, Y. Mao, M. Tontchev, Q. Hu, B. Fuller, D. Testuggine, and M. Khabsa, \"Llama guard: Llm-based input-output safeguard for human-ai conversations,\" CoRR, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 216, + 301, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 216, + 301, + 261 + ], + "spans": [ + { + "bbox": [ + 47, + 216, + 301, + 261 + ], + "type": "text", + "content": "[331] J. Ji, T. Qiu, B. Chen, B. Zhang, H. Lou, K. Wang, Y. Duan, Z. He, J. Zhou, Z. Zhang et al., \"Ai alignment: A comprehensive survey,\" arXiv preprint arXiv:2310.19852, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 262, + 301, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 262, + 301, + 308 + ], + "spans": [ + { + "bbox": [ + 47, + 262, + 301, + 308 + ], + "type": "text", + "content": "[332] T. A. Qiu, Y. Zhang, X. Huang, J. Li, J. Ji, and Y. Yang, \"Progressgym: Alignment with a millennium of moral progress,\" Advances in Neural Information Processing Systems, vol. 37, pp. 14570-14607, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 308, + 301, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 301, + 354 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 301, + 354 + ], + "type": "text", + "content": "[333] B. Wang, W. Chen, H. Pei, C. Xie, M. Kang, C. Zhang, C. Xu, Z. Xiong, R. Dutta, R. Schaeffer et al., \"Decoding trust: A comprehensive assessment of trustworthiness in gpt models.\" in NeurIPS, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 354, + 301, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 354, + 301, + 399 + ], + "spans": [ + { + "bbox": [ + 47, + 354, + 301, + 399 + ], + "type": "text", + "content": "[334] S. Gehman, S. Gururangan, M. Sap, Y. Choi, and N. A. Smith, \"Realtoxicityprompts: Evaluating neural toxic degeneration in language models,\" arXiv preprint arXiv:2009.11462, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 401, + 301, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 401, + 301, + 435 + ], + "spans": [ + { + "bbox": [ + 47, + 401, + 301, + 435 + ], + "type": "text", + "content": "[335] Y. Wang, H. Li, X. Han, P. Nakov, and T. Baldwin, \"Do-not-answer: A dataset for evaluating safeguards in llms,\" arXiv preprint arXiv:2308.13387, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 436, + 301, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 436, + 301, + 492 + ], + "spans": [ + { + "bbox": [ + 47, + 436, + 301, + 492 + ], + "type": "text", + "content": "[336] M. Conover, R. Staats, A. Rane, G. Shani, K. Katz, A. Powell, A. Ross, A. Maas, and A. Zhang, \"Databricks-dolly: Introducing dolly-15k, democratizing the magic of instruction following,\" https://github.com/databrickslabs/dolly, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 493, + 301, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 493, + 301, + 548 + ], + "spans": [ + { + "bbox": [ + 47, + 493, + 301, + 548 + ], + "type": "text", + "content": "[337] X. Wu, Y. Hao, K. Sun, Y. Chen, F. Zhu, R. Zhao, and H. Li, \"Human preference score v2: A solid benchmark for evaluating human preferences of text-to-image synthesis,\" arXiv preprint arXiv:2306.09341, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 550, + 301, + 607 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 550, + 301, + 607 + ], + "spans": [ + { + "bbox": [ + 47, + 550, + 301, + 607 + ], + "type": "text", + "content": "[338] Y. Yan, S. Wang, J. Huo, H. Li, B. Li, J. Su, X. Gao, Y.-F. Zhang, T. Xu, Z. Chu et al., \"Errorradar: Benchmarking complex mathematical reasoning of multimodal large language models via error detection,\" arXiv preprint arXiv:2410.04509, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 608, + 301, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 608, + 301, + 643 + ], + "spans": [ + { + "bbox": [ + 47, + 608, + 301, + 643 + ], + "type": "text", + "content": "[339] Q. Jin, B. Dhingra, Z. Liu, W. W. Cohen, and X. Lu, \"Pubmedqa: A dataset for biomedical research question answering,\" arXiv preprint arXiv:1909.06146, 2019." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 643, + 301, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 643, + 301, + 689 + ], + "spans": [ + { + "bbox": [ + 47, + 643, + 301, + 689 + ], + "type": "text", + "content": "[340] K. M. Hermann, T. Kocisky, E. Grefenstette, L. Espeholt, W. Kay, M. Suleyman, and P. Blunsom, \"Teaching machines to read and comprehend,\" Advances in neural information processing systems, vol. 28, 2015." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 689, + 301, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 301, + 723 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 301, + 723 + ], + "type": "text", + "content": "[341] S. Lin, J. Hilton, and O. Evans, \"Truthfulqa: Measuring how models mimic human falsehoods,\" arXiv preprint arXiv:2109.07958, 2021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 723, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 723, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 47, + 723, + 301, + 747 + ], + "type": "text", + "content": "[342] Y. Mou, S. Zhang, and W. Ye, \"Sg-bench: Evaluating llm safety generalization across diverse tasks and" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "type": "text", + "content": "prompt types,\" Advances in Neural Information Processing Systems, vol. 37, pp. 123032-123054, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 66, + 564, + 111 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 66, + 564, + 111 + ], + "spans": [ + { + "bbox": [ + 310, + 66, + 564, + 111 + ], + "type": "text", + "content": "[343] F. Jiang, Z. Xu, Y. Li, L. Niu, Z. Xiang, B. Li, B. Y. Lin, and R. Poovendran, \"Safechain: Safety of language models with long chain-of-thought reasoning capabilities,\" arXiv preprint arXiv:2502.12025, 2025." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 112, + 564, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 112, + 564, + 168 + ], + "spans": [ + { + "bbox": [ + 310, + 112, + 564, + 168 + ], + "type": "text", + "content": "[344] T. Hartvigsen, S. Gabriel, H. Palangi, M. Sap, D. Ray, and E. Kamar, \"Toxigen: A large-scale machine-generated dataset for adversarial and implicit hate speech detection,\" arXiv preprint arXiv:2203.09509, 2022." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 170, + 564, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 170, + 564, + 215 + ], + "spans": [ + { + "bbox": [ + 310, + 170, + 564, + 215 + ], + "type": "text", + "content": "[345] A. Souly, Q. Lu, D. Bowen, T. Trinh, E. Hsieh, S. Pandey, P. Abbeel, J. Svegliato, S. Emmons, O. Watkins et al., \"A strongreject for empty jailbreaks,\" arXiv preprint arXiv:2402.10260, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 216, + 564, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 216, + 564, + 283 + ], + "spans": [ + { + "bbox": [ + 310, + 216, + 564, + 283 + ], + "type": "text", + "content": "[346] L. Jiang, K. Rao, S. Han, A. Ettinger, F. Brahman, S. Kumar, N. Mireshghallah, X. Lu, M. Sap, Y. Choi et al., \"Wildteaming at scale: From in-the-wild jailbreaks to (adversarily) safer language models,\" Advances in Neural Information Processing Systems, vol. 37, pp. 47094-47165, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 285, + 564, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 285, + 564, + 319 + ], + "spans": [ + { + "bbox": [ + 310, + 285, + 564, + 319 + ], + "type": "text", + "content": "[347] D. Hendrycks, M. Mazeika, and T. Woodside, \"An overview of catastrophic ai risks,\" arXiv preprint arXiv:2306.12001, 2023." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 319, + 564, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 319, + 564, + 376 + ], + "spans": [ + { + "bbox": [ + 310, + 319, + 564, + 376 + ], + "type": "text", + "content": "[348] B. Baker, J. Huizinga, L. Gao, Z. Dou, M. Y. Guan, A. Madry, W. Zaremba, J. Pachocki, and D. Farhi, \"Monitoring reasoning models for misbehavior and the risks of promoting obfuscation,\" arXiv preprint arXiv:2503.11926, 2025." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 377, + 564, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 377, + 564, + 434 + ], + "spans": [ + { + "bbox": [ + 310, + 377, + 564, + 434 + ], + "type": "text", + "content": "[349] T. Hagendorff, \"Deception abilities emerged in large language models,\" Proceedings of the National Academy of Sciences, vol. 121, no. 24, p. e2317967121, 2024. [Online]. Available: https://www.pnas.org/doi/abs/10.1073/pnas.2317967121" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 435, + 564, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 435, + 564, + 479 + ], + "spans": [ + { + "bbox": [ + 310, + 435, + 564, + 479 + ], + "type": "text", + "content": "[350] P. S. Park, S. Goldstein, A. O'Gara, M. Chen, and D. Hendrycks, \"Ai deception: A survey of examples, risks, and potential solutions,\" Patterns, vol. 5, no. 5, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 481, + 564, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 481, + 564, + 504 + ], + "spans": [ + { + "bbox": [ + 310, + 481, + 564, + 504 + ], + "type": "text", + "content": "[351] OpenAI, \"Gpt-4 technical report,\" ArXiv, vol. abs/2303.08774, 2023." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 505, + 564, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 505, + 564, + 550 + ], + "spans": [ + { + "bbox": [ + 310, + 505, + 564, + 550 + ], + "type": "text", + "content": "[352] F. Ward, F. Toni, F. Belardinelli, and T. Everitt, \"Honesty is the best policy: defining and mitigating ai deception,\" Advances in neural information processing systems, vol. 36, pp. 2313-2341, 2023." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 550, + 564, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 550, + 564, + 596 + ], + "spans": [ + { + "bbox": [ + 310, + 550, + 564, + 596 + ], + "type": "text", + "content": "[353] J. Scheurer, M. Balesni, and M. Hobbahn, \"Large language models can strategically deceive their users when put under pressure,\" arXiv preprint arXiv:2311.07590, 2023." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 597, + 564, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 597, + 564, + 642 + ], + "spans": [ + { + "bbox": [ + 310, + 597, + 564, + 642 + ], + "type": "text", + "content": "[354] S. Chern, Z. Hu, Y. Yang, E. Chern, Y. Guo, J. Jin, B. Wang, and P. Liu, \"Behonest: Benchmarking honesty in large language models,\" arXiv preprint arXiv:2406.13261, 2024." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 643, + 564, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 643, + 564, + 677 + ], + "spans": [ + { + "bbox": [ + 310, + 643, + 564, + 677 + ], + "type": "text", + "content": "[355] A. O'Gara, \"Hoodwinked: Deception and cooperation in a text-based game for language models,\" arXiv preprint arXiv:2308.01404, 2023." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 677, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 677, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 677, + 564, + 746 + ], + "type": "text", + "content": "[356] M. F. A. R. D. T. (FAIR)†, A. Bakhtin, N. Brown, E. Dinan, G. Farina, C. Flaherty, D. Fried, A. Goff, J. Gray, H. Hu et al., \"Human-level play in the game of diplomacy by combining language models with strategic reasoning,\" Science, vol. 378, no. 6624, pp. 1067-1074, 2022." + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "48" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 47 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 88 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 42, + 301, + 88 + ], + "spans": [ + { + "bbox": [ + 47, + 42, + 301, + 88 + ], + "type": "text", + "content": "[357] L. Schulz, N. Alon, J. Rosenschein, and P. Dayan, \"Emergent deception and skepticism via theory of mind,\" in First Workshop on Theory of Mind in Communicating Agents, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 89, + 301, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 89, + 301, + 133 + ], + "spans": [ + { + "bbox": [ + 47, + 89, + 301, + 133 + ], + "type": "text", + "content": "[358] A. Meinke, B. Schoen, J. Scheurer, M. Balesni, R. Shah, and M. Hobbahn, \"Frontier models are capable of in-context scheming,\" arXiv preprint arXiv:2412.04984, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 134, + 301, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 134, + 301, + 180 + ], + "spans": [ + { + "bbox": [ + 47, + 134, + 301, + 180 + ], + "type": "text", + "content": "[359] R. Greenblatt, C. Denison, B. Wright, F. Roger, M. Mac-Diarmid, S. Marks, J. Treutlein, T. Belonax, J. Chen, D. Duvenaud et al., \"Alignment faking in large language models,\" arXiv preprint arXiv:2412.14093, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 181, + 301, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 181, + 301, + 250 + ], + "spans": [ + { + "bbox": [ + 47, + 181, + 301, + 250 + ], + "type": "text", + "content": "[360] A. Pan, J. S. Chan, A. Zou, N. Li, S. Basart, T. Woodside, H. Zhang, S. Emmons, and D. Hendrycks, \"Do the rewards justify the means? measuring trade-offs between rewards and ethical behavior in the machiavelli benchmark,\" in International conference on machine learning. PMLR, 2023, pp. 26837-26867." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 251, + 301, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 251, + 301, + 296 + ], + "spans": [ + { + "bbox": [ + 47, + 251, + 301, + 296 + ], + "type": "text", + "content": "[361] L. Vaugrante, F. Carlon, M. Menke, and T. Hagen-dorff, \"Compromising honesty and harmlessness in language models via deception attacks,\" arXiv preprint arXiv:2502.08301, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 297, + 301, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 297, + 301, + 330 + ], + "spans": [ + { + "bbox": [ + 47, + 297, + 301, + 330 + ], + "type": "text", + "content": "[362] J. Ji, K. Wang, T. Qiu, B. Chen, J. Zhou, C. Li, H. Lou, and Y. Yang, \"Language models resist alignment,\" arXiv preprint arXiv:2406.06144, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 331, + 301, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 331, + 301, + 376 + ], + "spans": [ + { + "bbox": [ + 47, + 331, + 301, + 376 + ], + "type": "text", + "content": "[363] L. Bürger, F. A. Hamprecht, and B. Nadler, \"Truth is universal: Robust detection of lies in llms,\" Advances in Neural Information Processing Systems, vol. 37, pp. 138-393-138-431, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 377, + 301, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 377, + 301, + 422 + ], + "spans": [ + { + "bbox": [ + 47, + 377, + 301, + 422 + ], + "type": "text", + "content": "[364] OpenAI, \"Detecting misbehavior in frontier reasoning models,\" https://openai.com/index/chain-of-thought-monitoring/, Mar. 2025, accessed: 2025-05-14." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 423, + 301, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 423, + 301, + 458 + ], + "spans": [ + { + "bbox": [ + 47, + 423, + 301, + 458 + ], + "type": "text", + "content": "[365] T. Everitt, V. Krakovna, L. Orseau, M. Hutter, and S. Legg, \"Reinforcement learning with a corrupted reward channel,\" arXiv preprint arXiv:1705.08417, 2017." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 458, + 301, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 458, + 301, + 492 + ], + "spans": [ + { + "bbox": [ + 47, + 458, + 301, + 492 + ], + "type": "text", + "content": "[366] S. Zhuang and D. Hadfield-Menell, \"Consequences of misaligned ai,\" Advances in Neural Information Processing Systems, vol. 33, pp. 15763-15773, 2020." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 492, + 301, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 492, + 301, + 561 + ], + "spans": [ + { + "bbox": [ + 47, + 492, + 301, + 561 + ], + "type": "text", + "content": "[367] V. Krakovna, J. Uesato, V. Mikulik, M. Rahtz, T. Everitt, R. Kumar, Z. Kenton, J. Leike, and S. Legg, \"Specification gaming: the flip side of ai ingenuity,\" 2020, accessed: 2025-03-30. [Online]. Available: https://deepmind.google/discover/blog/ specification-gaming-the-flip-side-of-ai-ingenuity/" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 562, + 301, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 562, + 301, + 596 + ], + "spans": [ + { + "bbox": [ + 47, + 562, + 301, + 596 + ], + "type": "text", + "content": "[368] D. Amodei, C. Olah, J. Steinhardt, P. Christiano, J. Schulman, and D. Mané, \"Concrete problems in air safety,\" arXiv preprint arXiv:1606.06565, 2016." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 597, + 301, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 597, + 301, + 642 + ], + "spans": [ + { + "bbox": [ + 47, + 597, + 301, + 642 + ], + "type": "text", + "content": "[369] L. Weng, \"Reward hacking in reinforcement learning,\" 2024, accessed: 2025-03-30. [Online]. Available: https://lilianweng.github.io/posts/2024-11-28-reward-hacking" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 643, + 301, + 699 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 643, + 301, + 699 + ], + "spans": [ + { + "bbox": [ + 47, + 643, + 301, + 699 + ], + "type": "text", + "content": "[370] T. Everitt, M. Hutter, R. Kumar, and V. Krakovna, \"Reward tampering problems and solutions in reinforcement learning: A causal influence diagram perspective,\" Synthese, vol. 198, no. Suppl 27, pp. 6435-6467, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 700, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 700, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 47, + 700, + 301, + 747 + ], + "type": "text", + "content": "[371] J. Skalse, N. Howe, D. Krasheninnikov, and D. Krueger, \"Defining and characterizing reward gaming,\" Advances in Neural Information Processing Systems, vol. 35, pp. 9460-9471, 2022." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 747 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 310, + 42, + 564, + 100 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 42, + 564, + 100 + ], + "spans": [ + { + "bbox": [ + 310, + 42, + 564, + 100 + ], + "type": "text", + "content": "[372] S. Casper, X. Davies, C. Shi, T. K. Gilbert, J. Scheurer, J. Rando, R. Freedman, T. Korbak, D. Lindner, P. Freire et al., \"Open problems and fundamental limitations of reinforcement learning from human feedback,\" arXiv preprint arXiv:2307.15217, 2023." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 101, + 564, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 101, + 564, + 144 + ], + "spans": [ + { + "bbox": [ + 310, + 101, + 564, + 144 + ], + "type": "text", + "content": "[373] L. Gao, J. Schulman, and J. Hilton, \"Scaling laws for reward model overoptimization,\" in International Conference on Machine Learning. PMLR, 2023, pp. 10835-10866." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 146, + 564, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 146, + 564, + 214 + ], + "spans": [ + { + "bbox": [ + 310, + 146, + 564, + 214 + ], + "type": "text", + "content": "[374] E. Perez, S. Ringer, K. Lukosiute, K. Nguyen, E. Chen, S. Heiner, C. Pettit, C. Olsson, S. Kundu, S. Kadavath et al., \"Discovering language model behaviors with model-written evaluations,\" in Findings of the Association for Computational Linguistics: ACL 2023, 2023, pp. 13387-13434." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 215, + 564, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 215, + 564, + 274 + ], + "spans": [ + { + "bbox": [ + 310, + 215, + 564, + 274 + ], + "type": "text", + "content": "[375] C. Denison, M. MacDiarmid, F. Berez, D. Duvenaud, S. Kravec, S. Marks, N. Schiefer, R. Soklaski, A. Tamkin, J. Kaplan et al., \"Sycophancy to subterfuge: Investigating reward-tampering in large language models,\" arXiv preprint arXiv:2406.10162, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 274, + 564, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 274, + 564, + 308 + ], + "spans": [ + { + "bbox": [ + 310, + 274, + 564, + 308 + ], + "type": "text", + "content": "[376] P. Singhal, T. Goyal, J. Xu, and G. Durrett, \"A long way to go: Investigating length correlations in rlhf,\" arXiv preprint arXiv:2310.03716, 2023." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 308, + 564, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 308, + 564, + 365 + ], + "spans": [ + { + "bbox": [ + 310, + 308, + 564, + 365 + ], + "type": "text", + "content": "[377] F. Bianchi, M. Suzgun, G. Attanasio, P. Röttger, D. Jurafsky, T. Hashimoto, and J. Zou, \"Safety-tuned llamas: Lessons from improving the safety of large language models that follow instructions,\" arXiv preprint arXiv:2309.07875, 2023." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 365, + 564, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 365, + 564, + 399 + ], + "spans": [ + { + "bbox": [ + 310, + 365, + 564, + 399 + ], + "type": "text", + "content": "[378] M. Tegmark and S. Omohundro, \"Provably safe systems: the only path to controllable agi,\" arXiv preprint arXiv:2309.01933, 2023." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 399, + 564, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 399, + 564, + 458 + ], + "spans": [ + { + "bbox": [ + 310, + 399, + 564, + 458 + ], + "type": "text", + "content": "[379] D. Dalrymple, J. Skalse, Y. Bengio, S. Russell, M. Tegmark, S. Seshia, S. Omohundro, C. Szegedy, B. Goldhaber, N. Ammann et al., \"Towards guaranteed safe ai: A framework for ensuring robust and reliable ai systems,\" arXiv preprint arXiv:2405.06624, 2024." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 458, + 564, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 458, + 564, + 504 + ], + "spans": [ + { + "bbox": [ + 310, + 458, + 564, + 504 + ], + "type": "text", + "content": "[380] A. Caliskan, J. J. Bryson, and A. Narayanan, \"Semantics derived automatically from language corpora contain human-like biases,\" Science, vol. 356, no. 6334, pp. 183-186, 2017." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 504, + 564, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 504, + 564, + 561 + ], + "spans": [ + { + "bbox": [ + 310, + 504, + 564, + 561 + ], + "type": "text", + "content": "[381] R. Xu, Z. Zhou, T. Zhang, Z. Qi, S. Yao, K. Xu, W. Xu, and H. Qiu, \"Walking in others' shoes: How perspective-taking guides large language models in reducing toxicity and bias,\" arXiv preprint arXiv:2407.15366, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 562, + 564, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 562, + 564, + 608 + ], + "spans": [ + { + "bbox": [ + 310, + 562, + 564, + 608 + ], + "type": "text", + "content": "[382] D. Acemoglu and P. Restrepo, \"Artificial intelligence, automation, and work,\" in The economics of artificial intelligence: An agenda. University of Chicago Press, 2018, pp. 197-236." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 608, + 564, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 608, + 564, + 653 + ], + "spans": [ + { + "bbox": [ + 310, + 608, + 564, + 653 + ], + "type": "text", + "content": "[383] J. Mokander, J. Schuett, H. R. Kirk, and L. Floridi, \"Auditing large language models: a three-layered approach,\" AI and Ethics, vol. 4, no. 4, pp. 1085-1115, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 654, + 564, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 654, + 564, + 712 + ], + "spans": [ + { + "bbox": [ + 310, + 654, + 564, + 712 + ], + "type": "text", + "content": "[384] M. Anderljung, J. Barnhart, A. Korinek, J. Leung, C. O'Keefe, J. Whittlestone, S. Avin, M. Brundage, J. Bullock, D. Cass-Beggs et al., \"Frontier ai regulation: Managing emerging risks to public safety,\" arXiv preprint arXiv:2307.03718, 2023." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 712, + 564, + 735 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 712, + 564, + 735 + ], + "spans": [ + { + "bbox": [ + 310, + 712, + 564, + 735 + ], + "type": "text", + "content": "[385] A. Mannes, \"Governance, risk, and artificial intelligence,\" *Ai Magazine*, vol. 41, no. 1, pp. 61-69, 2020." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 735, + 564, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 735, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 310, + 735, + 564, + 747 + ], + "type": "text", + "content": "[386] L. Koessler and J. Schuett, \"Risk assessment at agi" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "49" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 48 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 71, + 42, + 301, + 77 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 301, + 77 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 301, + 77 + ], + "type": "text", + "content": "companies: A review of popular risk assessment techniques from other safety-critical industries,\" arXiv preprint arXiv:2307.08823, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 77, + 301, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 77, + 301, + 123 + ], + "spans": [ + { + "bbox": [ + 47, + 77, + 301, + 123 + ], + "type": "text", + "content": "[387] J. Schuett, N. Dreksler, M. Anderljung, D. McCaffary, L. Heim, E. Bluemke, and B. Garfinkel, \"Towards best practices in agi safety and governance: A survey of expert opinion,\" arXiv preprint arXiv:2305.07153, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 124, + 301, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 124, + 301, + 168 + ], + "spans": [ + { + "bbox": [ + 47, + 124, + 301, + 168 + ], + "type": "text", + "content": "[388] L. Ho, J. Barnhart, R. Trager, Y. Bengio, M. Brundage, A. Carnegie, R. Chowdhury, A. Dafoe, G. Hadfield, M. Levi et al., \"International institutions for advanced ai,\" arXiv preprint arXiv:2307.04699, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 170, + 301, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 170, + 301, + 203 + ], + "spans": [ + { + "bbox": [ + 47, + 170, + 301, + 203 + ], + "type": "text", + "content": "[389] M. M. Maas, \"Aligning ai regulation to sociotechnical change,\" in The Oxford Handbook of AI Governance, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 205, + 301, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 205, + 301, + 261 + ], + "spans": [ + { + "bbox": [ + 47, + 205, + 301, + 261 + ], + "type": "text", + "content": "[390] M. Kinniment, L. J. K. Sato, H. Du, B. Goodrich, M. Hasin, L. Chan, L. H. Miles, T. R. Lin, H. Wijk, J. Burget et al., \"Evaluating language-model agents on realistic autonomous tasks,\" arXiv preprint arXiv:2312.11671, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 262, + 301, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 262, + 301, + 319 + ], + "spans": [ + { + "bbox": [ + 47, + 262, + 301, + 319 + ], + "type": "text", + "content": "[391] J. Tallberg, E. Erman, M. Furendal, J. Geith, M. Klamberg, and M. Lundgren, \"The global governance of artificial intelligence: Next steps for empirical and normative research,\" International Studies Review, vol. 25, no. 3, p. viad040, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 320, + 301, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 320, + 301, + 342 + ], + "spans": [ + { + "bbox": [ + 47, + 320, + 301, + 342 + ], + "type": "text", + "content": "[392] OECD, \"OECD Principles on Artificial Intelligence,\" https://oecd.ai/en/ai-principles, 2019." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 343, + 301, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 343, + 301, + 376 + ], + "spans": [ + { + "bbox": [ + 47, + 343, + 301, + 376 + ], + "type": "text", + "content": "[393] UNESCO, \"Recommendation on the Ethics of Artificial Intelligence,\" https://unesdoc.unesco.org/ark:/48223/pf0000381137, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 377, + 301, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 377, + 301, + 456 + ], + "spans": [ + { + "bbox": [ + 47, + 377, + 301, + 456 + ], + "type": "text", + "content": "[394] E. Seger, N. Dreksler, R. Moulange, E. Dardaman, J. Schuett, K. Wei, C. Winter, M. Arnold, S. O. hEigeartaigh, A. Korinek et al., \"Open-sourcing highly capable foundation models: An evaluation of risks, benefits, and alternative methods for pursuing open-source objectives,\" arXiv preprint arXiv:2311.09227, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 458, + 301, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 458, + 301, + 503 + ], + "spans": [ + { + "bbox": [ + 47, + 458, + 301, + 503 + ], + "type": "text", + "content": "[395] F. Urbina, F. Lentzos, C. Invernizzi, and S. Ekins, \"Dual use of artificial-intelligence-powered drug discovery,\" Nature machine intelligence, vol. 4, no. 3, pp. 189-191, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 505, + 301, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 505, + 301, + 538 + ], + "spans": [ + { + "bbox": [ + 47, + 505, + 301, + 538 + ], + "type": "text", + "content": "[396] Meta, \"Meta and Microsoft introduce the next generation of Llama,\" https://ai.meta.com/blog/llama-2, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 539, + 301, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 539, + 301, + 594 + ], + "spans": [ + { + "bbox": [ + 47, + 539, + 301, + 594 + ], + "type": "text", + "content": "[397] E. Mostaque, \"Democratizing ai, stable diffusion & generative models,\" https://exchange scale.com/public/videos/emad-mostaque-stability-ai-stable-diffusion-open-sou2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 597, + 301, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 597, + 301, + 653 + ], + "spans": [ + { + "bbox": [ + 47, + 597, + 301, + 653 + ], + "type": "text", + "content": "[398] J. A. Goldstein, G. Sastry, M. Musser, R. DiResta, M. Gentzel, and K. Sedova, \"Generative language models and automated influence operations: Emerging threats and potential mitigations,\" arXiv preprint arXiv:2301.04246, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 654, + 301, + 711 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 654, + 301, + 711 + ], + "spans": [ + { + "bbox": [ + 47, + 654, + 301, + 711 + ], + "type": "text", + "content": "[399] I. Solaiman, M. Brundage, J. Clark, A. Askell, A. Herbert-Voss, J. Wu, A. Radford, G. Krueger, J. W. Kim, S. Kreps et al., \"Release strategies and the social impacts of language models,\" arXiv preprint arXiv:1908.09203, 2019." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "type": "text", + "content": "[400] P. Chavez, \"An ai challenge: Balancing open and closed systems,\" https://cepa.org/article/an-ai-challenge-balancing-open-and-closed-systems," + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 311, + 43, + 564, + 747 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 335, + 43, + 358, + 53 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 43, + 358, + 53 + ], + "spans": [ + { + "bbox": [ + 335, + 43, + 358, + 53 + ], + "type": "text", + "content": "2023." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 311, + 53, + 564, + 100 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 53, + 564, + 100 + ], + "spans": [ + { + "bbox": [ + 311, + 53, + 564, + 100 + ], + "type": "text", + "content": "[401] N. Zhang, Y. Yao, B. Tian, P. Wang, S. Deng, M. Wang, Z. Xi, S. Mao, J. Zhang, Y. Ni et al., \"A comprehensive study of knowledge editing for large language models,\" arXiv preprint arXiv:2401.01286, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 311, + 101, + 564, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 101, + 564, + 145 + ], + "spans": [ + { + "bbox": [ + 311, + 101, + 564, + 145 + ], + "type": "text", + "content": "[402] J. Fang, H. Jiang, K. Wang, Y. Ma, X. Wang, X. He, and T.-s. Chua, \"Alphaedit: Null-space constrained knowledge editing for language models,\" arXiv preprint arXiv:2410.02355, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 311, + 147, + 564, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 147, + 564, + 203 + ], + "spans": [ + { + "bbox": [ + 311, + 147, + 564, + 203 + ], + "type": "text", + "content": "[403] Z. Zhang, Y. Zhou, X. Zhao, T. Che, and L. Lyu, \"Prompt certified machine unlearning with randomized gradient smoothing and quantization,\" Advances in Neural Information Processing Systems, vol. 35, pp. 13433-13455, 2022." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 311, + 205, + 564, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 205, + 564, + 261 + ], + "spans": [ + { + "bbox": [ + 311, + 205, + 564, + 261 + ], + "type": "text", + "content": "[404] T. Che, Y. Zhou, Z. Zhang, L. Lyu, J. Liu, D. Yan, D. Dou, and J. Huan, \"Fast federated machine unlearning with nonlinear functional theory,\" in International conference on machine learning. PMLR, 2023, pp. 4241-4268." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 311, + 262, + 564, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 262, + 564, + 296 + ], + "spans": [ + { + "bbox": [ + 311, + 262, + 564, + 296 + ], + "type": "text", + "content": "[405] W. Wang, Z. Tian, C. Zhang, and S. Yu, \"Machine unlearning: A comprehensive survey,\" arXiv preprint arXiv:2405.07406, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 311, + 297, + 564, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 297, + 564, + 342 + ], + "spans": [ + { + "bbox": [ + 311, + 297, + 564, + 342 + ], + "type": "text", + "content": "[406] S. Liu, Y. Yao, J. Jia, S. Casper, N. Baracaldo, P. Hase, Y. Yao, C. Y. Liu, X. Xu, H. Li et al., \"Rethinking machine unlearning for large language models,\" Nature Machine Intelligence, pp. 1-14, 2025." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 311, + 344, + 564, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 344, + 564, + 376 + ], + "spans": [ + { + "bbox": [ + 311, + 344, + 564, + 376 + ], + "type": "text", + "content": "[407] Y. Yao, X. Xu, and Y. Liu, \"Large language model unlearning,\" Advances in Neural Information Processing Systems, vol. 37, pp. 105-425-105-475, 2025." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 311, + 377, + 564, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 377, + 564, + 422 + ], + "spans": [ + { + "bbox": [ + 311, + 377, + 564, + 422 + ], + "type": "text", + "content": "[408] C. Ding, J. Wu, Y. Yuan, J. Lu, K. Zhang, A. Su, X. Wang, and X. He, \"Unified parameter-efficient unlearning for llms,\" arXiv preprint arXiv:2412.00383, 2024." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 311, + 423, + 564, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 423, + 564, + 468 + ], + "spans": [ + { + "bbox": [ + 311, + 423, + 564, + 468 + ], + "type": "text", + "content": "[409] Z. Li, H. Jiang, H. Chen, B. Bi, Z. Zhou, F. Sun, J. Fang, and X. Wang, \"Reinforced lifelong editing for language models,\" arXiv preprint arXiv:2502.05759, 2025." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 311, + 470, + 564, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 470, + 564, + 503 + ], + "spans": [ + { + "bbox": [ + 311, + 470, + 564, + 503 + ], + "type": "text", + "content": "[410] E. Mitchell, C. Lin, A. Bosselut, C. Finn, and C. D. Manning, \"Fast model editing at scale,\" arXiv preprint arXiv:2110.11309, 2021." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 311, + 505, + 564, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 505, + 564, + 538 + ], + "spans": [ + { + "bbox": [ + 311, + 505, + 564, + 538 + ], + "type": "text", + "content": "[411] N. De Cao, W. Aziz, and I. Titov, \"Editing factual knowledge in language models,\" arXiv preprint arXiv:2104.08164, 2021." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 311, + 539, + 564, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 539, + 564, + 585 + ], + "spans": [ + { + "bbox": [ + 311, + 539, + 564, + 585 + ], + "type": "text", + "content": "[412] P. Wang, Z. Li, N. Zhang, Z. Xu, Y. Yao, Y. Jiang, P. Xie, F. Huang, and H. Chen, \"Wise: Rethinking the knowledge memory for lifelong model editing of large language models,\" arXiv preprint arXiv:2405.14768, 2024." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 311, + 586, + 564, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 586, + 564, + 641 + ], + "spans": [ + { + "bbox": [ + 311, + 586, + 564, + 641 + ], + "type": "text", + "content": "[413] T. Hartvigsen, S. Sankaranarayanan, H. Palangi, Y. Kim, and M. Ghassemi, \"Aging with grace: Lifelong model editing with discrete key-value adaptors,\" Advances in Neural Information Processing Systems, vol. 36, 2024." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 311, + 643, + 564, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 643, + 564, + 688 + ], + "spans": [ + { + "bbox": [ + 311, + 643, + 564, + 688 + ], + "type": "text", + "content": "[414] H. Jiang, J. Fang, N. Zhang, G. Ma, M. Wan, X. Wang, X. He, and T.-s. Chua, \"Anyedit: Edit any knowledge encoded in language models,\" arXiv preprint arXiv:2502.05628, 2025." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 311, + 689, + 564, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 689, + 564, + 734 + ], + "spans": [ + { + "bbox": [ + 311, + 689, + 564, + 734 + ], + "type": "text", + "content": "[415] H. Jiang, J. Fang, T. Zhang, A. Zhang, R. Wang, T. Liang, and X. Wang, \"Neuron-level sequential editing for large language models,\" arXiv preprint arXiv:2410.04045, 2024." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 311, + 735, + 564, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 735, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 311, + 735, + 564, + 747 + ], + "type": "text", + "content": "[416] K. Meng, D. Bau, A. Andonian, and Y. Belinkov," + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "50" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 49 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 71, + 42, + 300, + 77 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 300, + 77 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 300, + 77 + ], + "type": "text", + "content": "\"Locating and editing factual associations in gpt,\" Advances in Neural Information Processing Systems, vol. 35, pp. 17359-17372, 2022." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 77, + 301, + 122 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 77, + 301, + 122 + ], + "spans": [ + { + "bbox": [ + 47, + 77, + 301, + 122 + ], + "type": "text", + "content": "[417] A. Prasad, P. Hase, X. Zhou, and M. Bansal, \"Grips: Gradient-free, edit-based instruction search for prompting large language models,\" arXiv preprint arXiv:2203.07281, 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 124, + 301, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 124, + 301, + 156 + ], + "spans": [ + { + "bbox": [ + 47, + 124, + 301, + 156 + ], + "type": "text", + "content": "[418] G. Gangadhar and K. Stratos, \"Model editing by standard fine-tuning,\" arXiv preprint arXiv:2402.11078, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 158, + 301, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 158, + 301, + 203 + ], + "spans": [ + { + "bbox": [ + 47, + 158, + 301, + 203 + ], + "type": "text", + "content": "[419] E. Mitchell, C. Lin, A. Bosselut, C. D. Manning, and C. Finn, \"Memory-based model editing at scale,\" in International Conference on Machine Learning. PMLR, 2022, pp. 15817-15831." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 205, + 301, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 205, + 301, + 250 + ], + "spans": [ + { + "bbox": [ + 47, + 205, + 301, + 250 + ], + "type": "text", + "content": "[420] Y. Yao, P. Wang, B. Tian, S. Cheng, Z. Li, S. Deng, H. Chen, and N. Zhang, \"Editing large language models: Problems, methods, and opportunities,\" arXiv preprint arXiv:2305.13172, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 251, + 301, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 251, + 301, + 285 + ], + "spans": [ + { + "bbox": [ + 47, + 251, + 301, + 285 + ], + "type": "text", + "content": "[421] K. Meng, A. S. Sharma, A. Andonian, Y. Belinkov, and D. Bau, \"Mass-editing memory in a transformer,\" arXiv preprint arXiv:2210.07229, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 285, + 301, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 285, + 301, + 330 + ], + "spans": [ + { + "bbox": [ + 47, + 285, + 301, + 330 + ], + "type": "text", + "content": "[422] J.-C. Gu, H.-X. Xu, J.-Y. Ma, P. Lu, Z.-H. Ling, K.-W. Chang, and N. Peng, \"Model editing can hurt general abilities of large language models,\" arXiv e-prints, pp. arXiv-2401, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 331, + 301, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 331, + 301, + 376 + ], + "spans": [ + { + "bbox": [ + 47, + 331, + 301, + 376 + ], + "type": "text", + "content": "[423] X. Li, S. Li, S. Song, J. Yang, J. Ma, and J. Yu, \"Pmet: Precise model editing in a transformer,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 17, 2024, pp. 18564-18572." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 377, + 301, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 377, + 301, + 411 + ], + "spans": [ + { + "bbox": [ + 47, + 377, + 301, + 411 + ], + "type": "text", + "content": "[424] M. Zhang, X. Ye, Q. Liu, P. Ren, S. Wu, and Z. Chen, \"Knowledge graph enhanced large language model editing,\" arXiv preprint arXiv:2402.13593, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 412, + 301, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 412, + 301, + 446 + ], + "spans": [ + { + "bbox": [ + 47, + 412, + 301, + 446 + ], + "type": "text", + "content": "[425] C. Chen, B. Huang, Z. Li, Z. Chen, S. Lai, X. Xu, J.-C. Gu, J. Gu, H. Yao, C. Xiao et al., \"Can editing llms inject harm?\" arXiv preprint arXiv:2407.20224, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 447, + 301, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 447, + 301, + 491 + ], + "spans": [ + { + "bbox": [ + 47, + 447, + 301, + 491 + ], + "type": "text", + "content": "[426] M. Wang, N. Zhang, Z. Xu, Z. Xi, S. Deng, Y. Yao, Q. Zhang, L. Yang, J. Wang, and H. Chen, \"Detoxifying large language models via knowledge editing,\" arXiv preprint arXiv:2403.14472, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 492, + 301, + 537 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 492, + 301, + 537 + ], + "spans": [ + { + "bbox": [ + 47, + 492, + 301, + 537 + ], + "type": "text", + "content": "[427] C. Zheng, L. Li, Q. Dong, Y. Fan, Z. Wu, J. Xu, and B. Chang, \"Can we edit factual knowledge by in-context learning?\" arXiv preprint arXiv:2305.12740, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 539, + 301, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 539, + 301, + 583 + ], + "spans": [ + { + "bbox": [ + 47, + 539, + 301, + 583 + ], + "type": "text", + "content": "[428] Y. Li, T. Li, K. Chen, J. Zhang, S. Liu, W. Wang, T. Zhang, and Y. Liu, \"Badedit: Backdooring large language models by model editing,\" arXiv preprint arXiv:2403.13355, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 585, + 301, + 630 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 585, + 301, + 630 + ], + "spans": [ + { + "bbox": [ + 47, + 585, + 301, + 630 + ], + "type": "text", + "content": "[429] K. Grimes, M. Christiani, D. Shriver, and M. Connor, \"Concept-rot: Poisoning concepts in large language models with model editing,\" arXiv preprint arXiv:2412.13341, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 632, + 301, + 676 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 632, + 301, + 676 + ], + "spans": [ + { + "bbox": [ + 47, + 632, + 301, + 676 + ], + "type": "text", + "content": "[430] X. Wu, J. Li, M. Xu, W. Dong, S. Wu, C. Bian, and D. Xiong, \"Depn: Detecting and editing privacy neurons in pretrained language models,\" arXiv preprint arXiv:2310.20138, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 677, + 301, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 301, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 301, + 712 + ], + "type": "text", + "content": "[431] X. Li, Z. Li, Y. Kosuga, Y. Yoshida, and V. Bian, \"Precision knowledge editing: Enhancing safety in large language models,\" arXiv preprint arXiv:2410.03772, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "type": "text", + "content": "[432] X. Hu, D. Li, B. Hu, Z. Zheng, Z. Liu, and M. Zhang, \"Separate the wheat from the chaff: Model deficiency unlearning via parameter-efficient module op" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "type": "text", + "content": "eration,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 16, 2024, pp. 18252-18260." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 66, + 564, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 66, + 564, + 110 + ], + "spans": [ + { + "bbox": [ + 310, + 66, + 564, + 110 + ], + "type": "text", + "content": "[433] T. Yang, L. Dai, Z. Liu, X. Wang, M. Jiang, Y. Tian, and X. Zhang, \"Cliperase: Efficient unlearning of visual-textual associations in clip,\" arXiv preprint arXiv:2410.23330, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 112, + 564, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 112, + 564, + 157 + ], + "spans": [ + { + "bbox": [ + 310, + 112, + 564, + 157 + ], + "type": "text", + "content": "[434] R. Gandikota, J. Materzynska, J. Fiotto-Kaufman, and D. Bau, \"Erasing concepts from diffusion models,\" 2023 IEEE/CVF International Conference on Computer Vision (ICCV), pp. 2426-2436, 2023." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 158, + 564, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 158, + 564, + 215 + ], + "spans": [ + { + "bbox": [ + 310, + 158, + 564, + 215 + ], + "type": "text", + "content": "[435] E. Zhang, K. Wang, X. Xu, Z. Wang, and H. Shi, \"Forget-me-not: Learning to forget in text-to-image diffusion models,\" 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pp. 1755-1764, 2023." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 216, + 564, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 216, + 564, + 271 + ], + "spans": [ + { + "bbox": [ + 310, + 216, + 564, + 271 + ], + "type": "text", + "content": "[436] C. Fan, J. Liu, Y. Zhang, D. Wei, E. Wong, and S. Liu, \"Salun: Empowering machine unlearning via gradient-based weight saliency in both image classification and generation,\" ArXiv, vol. abs/2310.12508, 2023." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 273, + 564, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 273, + 564, + 319 + ], + "spans": [ + { + "bbox": [ + 310, + 273, + 564, + 319 + ], + "type": "text", + "content": "[437] Z. Huang, X. Cheng, J. Zheng, H. Wang, Z. He, T. Li, and X. Huang, \"Unified gradient-based machine unlearning with remain geometry enhancement,\" ArXiv, vol. abs/2409.19732, 2024." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 319, + 564, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 319, + 564, + 365 + ], + "spans": [ + { + "bbox": [ + 310, + 319, + 564, + 365 + ], + "type": "text", + "content": "[438] A. Blanco-Justicia, J. Domingo-Ferrer, N. M. Jebreel, B. Manzanares-Salor, and D. Sánchez, \"Unlearning in large language models: We are not there yet,\" Computer, vol. 58, no. 1, pp. 97-100, 2025." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 366, + 564, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 366, + 564, + 423 + ], + "spans": [ + { + "bbox": [ + 310, + 366, + 564, + 423 + ], + "type": "text", + "content": "[439] S. Dai, C. Xu, S. Xu, L. Pang, Z. Dong, and J. Xu, \"Bias and unfairness in information retrieval systems: New challenges in the llm era,\" in Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, 2024, pp. 6437-6447." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 424, + 564, + 457 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 424, + 564, + 457 + ], + "spans": [ + { + "bbox": [ + 310, + 424, + 564, + 457 + ], + "type": "text", + "content": "[440] G. Nicolas and A. Caliskan, \"A taxonomy of stereotype content in large language models,\" arXiv preprint arXiv:2408.00162, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 458, + 564, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 458, + 564, + 503 + ], + "spans": [ + { + "bbox": [ + 310, + 458, + 564, + 503 + ], + "type": "text", + "content": "[441] S. Wang, R. Li, X. Chen, Y. Yuan, D. F. Wong, and M. Yang, \"Exploring the impact of personality traits on llm bias and toxicity,\" arXiv preprint arXiv:2502.12566, 2025." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 504, + 564, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 504, + 564, + 561 + ], + "spans": [ + { + "bbox": [ + 310, + 504, + 564, + 561 + ], + "type": "text", + "content": "[442] A. Liu, Q. Sheng, and X. Hu, \"Preventing and detecting misinformation generated by large language models,\" in Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval, 2024, pp. 3001-3004." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 562, + 564, + 607 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 562, + 564, + 607 + ], + "spans": [ + { + "bbox": [ + 310, + 562, + 564, + 607 + ], + "type": "text", + "content": "[443] Q. Zhang, H. Qiu, D. Wang, H. Qian, Y. Li, T. Zhang, and M. Huang, \"Understanding the dark side of lms' intrinsic self-correction,\" arXiv preprint arXiv:2412.14959, 2024." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 608, + 564, + 687 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 608, + 564, + 687 + ], + "spans": [ + { + "bbox": [ + 310, + 608, + 564, + 687 + ], + "type": "text", + "content": "[444] R. Xu, B. Lin, S. Yang, T. Zhang, W. Shi, T. Zhang, Z. Fang, W. Xu, and H. Qiu, \"The earth is flat because...: Investigating llms' belief towards misinformation via persuasive conversation,\" in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2024, pp. 16259-16303." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 689, + 564, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 689, + 564, + 723 + ], + "spans": [ + { + "bbox": [ + 310, + 689, + 564, + 723 + ], + "type": "text", + "content": "[445] Z. Liu, G. Dou, Z. Tan, Y. Tian, and M. Jiang, \"Machine unlearning in generative ai: A survey,\" arXiv preprint arXiv:2407.20516, 2024." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 723, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 723, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 723, + 564, + 746 + ], + "type": "text", + "content": "[446] Y. Qu, M. Ding, N. Sun, K. Thilakarathna, T. Zhu, and D. Niyato, \"The frontier of data erasure: Machine" + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "type": "text", + "content": "51" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 50 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "type": "text", + "content": "unlearning for large language models,\" arXiv preprint arXiv:2403.15779, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 66, + 301, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 66, + 301, + 123 + ], + "spans": [ + { + "bbox": [ + 47, + 66, + 301, + 123 + ], + "type": "text", + "content": "[447] A. Blanco-Justicia, N. Jebreel, B. Manzanares-Salor, D. Sánchez, J. Domingo-Ferrer, G. Collell, and K. Eeik Tan, \"Digital forgetting in large language models: A survey of unlearning methods,\" Artificial Intelligence Review, vol. 58, no. 3, p. 90, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 124, + 301, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 124, + 301, + 168 + ], + "spans": [ + { + "bbox": [ + 47, + 124, + 301, + 168 + ], + "type": "text", + "content": "[448] N. Li, C. Zhou, Y. Gao, H. Chen, Z. Zhang, B. Kuang, and A. Fu, \"Machine unlearning: Taxonomy, metrics, applications, challenges, and prospects,\" IEEE Transactions on Neural Networks and Learning Systems, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 170, + 301, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 170, + 301, + 203 + ], + "spans": [ + { + "bbox": [ + 47, + 170, + 301, + 203 + ], + "type": "text", + "content": "[449] C. Gao, L. Wang, C. Weng, X. Wang, and Q. Zhu, \"Practical unlearning for large language models,\" arXiv preprint arXiv:2407.10223, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 205, + 301, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 205, + 301, + 250 + ], + "spans": [ + { + "bbox": [ + 47, + 205, + 301, + 250 + ], + "type": "text", + "content": "[450] P. Thaker, S. Hu, N. Kale, Y. Maurya, Z. S. Wu, and V. Smith, \"Position: Llm unlearning benchmarks are weak measures of progress,\" arXiv preprint arXiv:2410.02879, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 251, + 301, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 251, + 301, + 306 + ], + "spans": [ + { + "bbox": [ + 47, + 251, + 301, + 306 + ], + "type": "text", + "content": "[451] K. Zhao, M. Kurmanji, G.-O. Bärbulescu, E. Triantafillou, and P. Triantafillou, \"What makes unlearning hard and what to do about it,\" Advances in Neural Information Processing Systems, vol. 37, pp. 12293-12333, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 308, + 301, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 301, + 354 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 301, + 354 + ], + "type": "text", + "content": "[452] W. Wang, M. Zhang, X. Ye, Z. Ren, Z. Chen, and P. Ren, \"Uipe: Enhancing llm unlearning by removing knowledge related to forgetting targets,\" arXiv preprint arXiv:2503.04693, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 354, + 301, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 354, + 301, + 400 + ], + "spans": [ + { + "bbox": [ + 47, + 354, + 301, + 400 + ], + "type": "text", + "content": "[453] H. Wang, Y. Jing, H. Sun, Y. Wang, J. Wang, J. Liao, and D. Tao, \"Erasing without remembering: Safeguarding knowledge forgetting in large language models,\" arXiv preprint arXiv:2502.19982, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 401, + 301, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 401, + 301, + 446 + ], + "spans": [ + { + "bbox": [ + 47, + 401, + 301, + 446 + ], + "type": "text", + "content": "[454] T. Tran, R. Liu, and L. Xiong, \"Tokens for learning, tokens for unlearning: Mitigating membership inference attacks in large language models via dual-purpose training,\" arXiv preprint arXiv:2502.19726, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 447, + 301, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 447, + 301, + 492 + ], + "spans": [ + { + "bbox": [ + 47, + 447, + 301, + 492 + ], + "type": "text", + "content": "[455] H. Xu, N. Zhao, L. Yang, S. Zhao, S. Deng, M. Wang, B. Hooi, N. Oo, H. Chen, and N. Zhang, \"Relearn: Unlearning via learning for large language models,\" arXiv preprint arXiv:2502.11190, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 493, + 301, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 493, + 301, + 538 + ], + "spans": [ + { + "bbox": [ + 47, + 493, + 301, + 538 + ], + "type": "text", + "content": "[456] Q. Zhang, H. Qiu, D. Wang, Y. Li, T. Zhang, W. Zhu, H. Weng, L. Yan, and C. Zhang, \"Large scale knowledge washing,\" in The Thirteenth International Conference on Learning Representations, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 539, + 301, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 539, + 301, + 585 + ], + "spans": [ + { + "bbox": [ + 47, + 539, + 301, + 585 + ], + "type": "text", + "content": "[457] A. Thudi, H. Jia, I. Shumailov, and N. Papernot, \"On the necessity of auditable algorithmic definitions for machine unlearning,\" in 31st USENIX security symposium (USENIX Security 22), 2022, pp. 4007-4022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 586, + 301, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 586, + 301, + 619 + ], + "spans": [ + { + "bbox": [ + 47, + 586, + 301, + 619 + ], + "type": "text", + "content": "[458] S. Goel, A. Prabhu, P. Torr, P. Kumaraguru, and A. Sanyal, \"Corrective machine unlearning,\" Transactions on Machine Learning Research." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 620, + 301, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 620, + 301, + 677 + ], + "spans": [ + { + "bbox": [ + 47, + 620, + 301, + 677 + ], + "type": "text", + "content": "[459] A. Thudi, G. Deza, V. Chandrasekaran, and N. Papernot, \"Unrolling sgd: Understanding factors influencing machine unlearning,\" in 2022 IEEE 7th European Symposium on Security and Privacy (EuroS&P). IEEE, 2022, pp. 303-319." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 677, + 301, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 301, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 301, + 712 + ], + "type": "text", + "content": "[460] B. Liu, Q. Liu, and P. Stone, \"Continual learning and private unlearning,\" in Conference on Lifelong Learning Agents. PMLR, 2022, pp. 243-254." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 712, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 712, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 712, + 301, + 746 + ], + "type": "text", + "content": "[461] Q. P. Nguyen, B. K. H. Low, and P. Jaillet, \"Variational bayesian unlearning,\" Advances in Neural Information Processing Systems, vol. 33, pp. 16025-16036, 2020." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 310, + 42, + 564, + 88 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 42, + 564, + 88 + ], + "spans": [ + { + "bbox": [ + 310, + 42, + 564, + 88 + ], + "type": "text", + "content": "[462] L. Wang, T. Chen, W. Yuan, X. Zeng, K.-F. Wong, and H. Yin, \"Kga: A general machine unlearning framework based on knowledge gap alignment,\" arXiv preprint arXiv:2305.06535, 2023." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 89, + 564, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 89, + 564, + 133 + ], + "spans": [ + { + "bbox": [ + 310, + 89, + 564, + 133 + ], + "type": "text", + "content": "[463] Y. Liu, Y. Zhang, T. Jaakkola, and S. Chang, \"Revisiting who's harry potter: Towards targeted unlearning from a causal intervention perspective,\" arXiv preprint arXiv:2407.16997, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 135, + 564, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 135, + 564, + 168 + ], + "spans": [ + { + "bbox": [ + 310, + 135, + 564, + 168 + ], + "type": "text", + "content": "[464] P. Maini, Z. Feng, A. Schwarzschild, Z. C. Lipton, and J. Z. Kolter, \"Tofu: A task of fictitious unlearning for llms,\" arXiv preprint arXiv:2401.06121, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 170, + 564, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 170, + 564, + 214 + ], + "spans": [ + { + "bbox": [ + 310, + 170, + 564, + 214 + ], + "type": "text", + "content": "[465] R. Zhang, L. Lin, Y. Bai, and S. Mei, \"Negative preference optimization: From catastrophic collapse to effective unlearning,\" arXiv preprint arXiv:2404.05868, 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 216, + 564, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 216, + 564, + 272 + ], + "spans": [ + { + "bbox": [ + 310, + 216, + 564, + 272 + ], + "type": "text", + "content": "[466] R. Rafailov, A. Sharma, E. Mitchell, C. D. Manning, S. Ermon, and C. Finn, \"Direct preference optimization: Your language model is secretly a reward model,\" Advances in Neural Information Processing Systems, vol. 36, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 274, + 564, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 274, + 564, + 329 + ], + "spans": [ + { + "bbox": [ + 310, + 274, + 564, + 329 + ], + "type": "text", + "content": "[467] J. Huo, Y. Yan, X. Zheng, Y. Lyu, X. Zou, Z. Wei, and X. Hu, \"Mmunlearner: Reformulating multimodal machine unlearning in the era of multimodal large language models,\" arXiv preprint arXiv:2502.11051, 2025." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 331, + 564, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 331, + 564, + 376 + ], + "spans": [ + { + "bbox": [ + 310, + 331, + 564, + 376 + ], + "type": "text", + "content": "[468] J. Li, Q. Wei, C. Zhang, G. Qi, M. Du, Y. Chen, and S. Bi, \"Single image unlearning: Efficient machine unlearning in multimodal large language models,\" arXiv preprint arXiv:2405.12523, 2024." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 377, + 564, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 377, + 564, + 434 + ], + "spans": [ + { + "bbox": [ + 310, + 377, + 564, + 434 + ], + "type": "text", + "content": "[469] S. Xing, F. Zhao, Z. Wu, T. An, W. Chen, C. Li, J. Zhang, and X. Dai, \"Efuf: Efficient fine-grained unlearning framework for mitigating hallucinations in multimodal large language models,\" ArXiv, vol. abs/2402.09801, 2024." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 435, + 564, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 435, + 564, + 491 + ], + "spans": [ + { + "bbox": [ + 310, + 435, + 564, + 491 + ], + "type": "text", + "content": "[470] T. Chakraborty, E. Shayegani, Z. Cai, N. B. Abu-Ghazaleh, M. S. Asif, Y. Dong, A. K. Roy-Chowdhury, and C. Song, \"Cross-modal safety alignment: Is textual unlearning all you need?\" ArXiv, vol. abs/2406.02575, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 492, + 564, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 492, + 564, + 550 + ], + "spans": [ + { + "bbox": [ + 310, + 492, + 564, + 550 + ], + "type": "text", + "content": "[471] J. Chen, Z. Deng, K. Zheng, Y. Yan, S. Liu, P. Wu, P. Jiang, J. Liu, and X. Hu, \"Safeeraser: Enhancing safety in multimodal large language models through multimodal machine unlearning,\" arXiv preprint arXiv:2502.12520, 2025." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 551, + 564, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 551, + 564, + 596 + ], + "spans": [ + { + "bbox": [ + 310, + 551, + 564, + 596 + ], + "type": "text", + "content": "[472] G. Ilharco, M. T. Ribeiro, M. Wortsman, S. Gururangan, L. Schmidt, H. Hajishirzi, and A. Farhadi, \"Editing models with task arithmetic,\" arXiv preprint arXiv:2212.04089, 2022." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 597, + 564, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 597, + 564, + 631 + ], + "spans": [ + { + "bbox": [ + 310, + 597, + 564, + 631 + ], + "type": "text", + "content": "[473] D. Jung, J. Seo, J. Lee, C. Park, and H. Lim, \"Come: An unlearning-based approach to conflict-free model editing,\" arXiv preprint arXiv:2502.15826, 2025." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 632, + 564, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 632, + 564, + 677 + ], + "spans": [ + { + "bbox": [ + 310, + 632, + 564, + 677 + ], + "type": "text", + "content": "[474] B. Zhang, Z. Chen, Z. Zheng, J. Li, and H. Chen, \"Resolving editing-unlearning conflicts: A knowledge codebook framework for large language model updating,\" arXiv preprint arXiv:2502.00158, 2025." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 677, + 564, + 711 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 677, + 564, + 711 + ], + "spans": [ + { + "bbox": [ + 310, + 677, + 564, + 711 + ], + "type": "text", + "content": "[475] R. Eldan and M. Russinovich, \"Who's harry potter? approximate unlearning in llms,\" arXiv preprint arXiv:2310.02238, 2023." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 712, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 712, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 712, + 564, + 746 + ], + "type": "text", + "content": "[476] N. Li, A. Pan, A. Gopal, S. Yue, D. Berrios, A. Gatti, J. D. Li, A.-K. Dombrowski, S. Goel, L. Phan et al., \"The wmdp benchmark: Measuring and re" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "52" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 51 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "type": "text", + "content": "ducing malicious use with unlearning,\" arXiv preprint arXiv:2403.03218, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 66, + 301, + 100 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 66, + 301, + 100 + ], + "spans": [ + { + "bbox": [ + 47, + 66, + 301, + 100 + ], + "type": "text", + "content": "[477] M. Pawelczyk, S. Neel, and H. Lakkaraju, \"In-context unlearning: Language models as few shot unlearners,\" arXiv preprint arXiv:2310.07579, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 100, + 301, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 100, + 301, + 134 + ], + "spans": [ + { + "bbox": [ + 47, + 100, + 301, + 134 + ], + "type": "text", + "content": "[478] P. Thaker, Y. Maurya, S. Hu, Z. S. Wu, and V. Smith, \"Guardrail baselines for unlearning in llms,\" arXiv preprint arXiv:2403.03329, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 134, + 301, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 134, + 301, + 180 + ], + "spans": [ + { + "bbox": [ + 47, + 134, + 301, + 180 + ], + "type": "text", + "content": "[479] J. Ren, Z. Dai, X. Tang, H. Liu, J. Zeng, Z. Li, R. Goutam, S. Wang, Y. Xing, and Q. He, \"A general framework to enhance fine-tuning-based llm unlearning,\" arXiv preprint arXiv:2502.17823, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 180, + 301, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 180, + 301, + 227 + ], + "spans": [ + { + "bbox": [ + 47, + 180, + 301, + 227 + ], + "type": "text", + "content": "[480] X. Zhao, W. Cai, T. Shi, D. Huang, L. Lin, S. Mei, and D. Song, \"Improving llm safety alignment with dual-objective optimization,\" arXiv preprint arXiv:2503.03710, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 227, + 301, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 227, + 301, + 284 + ], + "spans": [ + { + "bbox": [ + 47, + 227, + 301, + 284 + ], + "type": "text", + "content": "[481] S. Takashiro, T. Kojima, A. Gambardella, Q. Cao, Y. Iwasawa, and Y. Matsuo, \"Answer when needed, forget when not: Language models pretend to forget via in-context knowledge unlearning,\" arXiv preprint arXiv:2410.00382, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 285, + 301, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 285, + 301, + 319 + ], + "spans": [ + { + "bbox": [ + 47, + 285, + 301, + 319 + ], + "type": "text", + "content": "[482] A. Muresanu, A. Thudi, M. R. Zhang, and N. Papernot, \"Unlearnable algorithms for in-context learning,\" arXiv preprint arXiv:2402.00751, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 319, + 301, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 319, + 301, + 354 + ], + "spans": [ + { + "bbox": [ + 47, + 319, + 301, + 354 + ], + "type": "text", + "content": "[483] Y. Zhou, X. Li, Q. Wang, and J. Shen, \"Visual in-context learning for large vision-language models,\" arXiv preprint arXiv:2402.11574, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 354, + 301, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 354, + 301, + 399 + ], + "spans": [ + { + "bbox": [ + 47, + 354, + 301, + 399 + ], + "type": "text", + "content": "[484] Z. Liu, G. Dou, X. Yuan, C. Zhang, Z. Tan, and M. Jiang, \"Modality-aware neuron pruning for unlearning in multimodal large language models,\" arXiv preprint arXiv:2502.15910, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 400, + 301, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 400, + 301, + 446 + ], + "spans": [ + { + "bbox": [ + 47, + 400, + 301, + 446 + ], + "type": "text", + "content": "[485] N. Yang, M. Kim, S. Yoon, J. Shin, and K. Jung, \"Faithun: Toward faithful forgetting in language models by investigating the interconnectedness of knowledge,\" arXiv preprint arXiv:2502.19207, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 447, + 301, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 447, + 301, + 492 + ], + "spans": [ + { + "bbox": [ + 47, + 447, + 301, + 492 + ], + "type": "text", + "content": "[486] A. Ramakrishna, Y. Wan, X. Jin, K.-W. Chang, Z. Bu, B. Vinzamuri, V. Cevher, M. Hong, and R. Gupta, \"Lume: Llm unlearning with multitask evaluations,\" arXiv preprint arXiv:2502.15097, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 492, + 301, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 492, + 301, + 550 + ], + "spans": [ + { + "bbox": [ + 47, + 492, + 301, + 550 + ], + "type": "text", + "content": "[487] Y. Lang, K. Guo, Y. Huang, Y. Zhou, H. Zhuang, T. Yang, Y. Su, and X. Zhang, \"Beyond single-value metrics: Evaluating and enhancing llm unlearning with cognitive diagnosis,\" arXiv preprint arXiv:2502.13996, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 550, + 301, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 550, + 301, + 596 + ], + "spans": [ + { + "bbox": [ + 47, + 550, + 301, + 596 + ], + "type": "text", + "content": "[488] Q. Wang, J. P. Zhou, Z. Zhou, S. Shin, B. Han, and K. Q. Weinberger, \"Rethinking llm unlearning objectives: A gradient perspective and go beyond,\" arXiv preprint arXiv:2502.19301, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 597, + 301, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 597, + 301, + 642 + ], + "spans": [ + { + "bbox": [ + 47, + 597, + 301, + 642 + ], + "type": "text", + "content": "[489] M. Khoriaty, A. Shportko, G. Mercier, and Z. Wood-Doughty, \"Don't forget it! conditional sparse autoencoder clamping works for unlearning,\" arXiv preprint arXiv:2503.11127, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 643, + 301, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 643, + 301, + 677 + ], + "spans": [ + { + "bbox": [ + 47, + 643, + 301, + 677 + ], + "type": "text", + "content": "[490] J. Cheng and H. Amiri, \"Mu-bench: A multitask multimodal benchmark for machine unlearning,\" arXiv preprint arXiv:2406.14796, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 677, + 301, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 301, + 723 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 301, + 723 + ], + "type": "text", + "content": "[491] V. Patil, Y.-L. Sung, P. Hase, J. Peng, T. Chen, and M. Bansal, \"Unlearning sensitive information in multimodal llms: Benchmark and attack-defense evaluation,\" Transactions on Machine Learning Research." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 723, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 723, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 723, + 301, + 746 + ], + "type": "text", + "content": "[492] Y. Ma, J. Wang, F. Wang, S. Ma, J. Li, X. Li, F. Huang, L. Sun, B. Li, Y. Choi et al., \"Benchmarking vision lan" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "type": "text", + "content": "guage model unlearning via fictitious facial identity dataset,\" arXiv preprint arXiv:2411.03554, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 65, + 564, + 111 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 65, + 564, + 111 + ], + "spans": [ + { + "bbox": [ + 310, + 65, + 564, + 111 + ], + "type": "text", + "content": "[493] S. Moon, M. Lee, S. Park, and D. Kim, “Holistic unlearning benchmark: A multi-faceted evaluation for text-to-image diffusion model unlearning,” arXiv preprint arXiv:2410.05664, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 111, + 564, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 111, + 564, + 134 + ], + "spans": [ + { + "bbox": [ + 310, + 111, + 564, + 134 + ], + "type": "text", + "content": "[494] D. Sanyal and M. Mandal, \"Alu: Agentic llm unlearning,\" arXiv preprint arXiv:2502.00406, 2025." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 134, + 564, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 134, + 564, + 168 + ], + "spans": [ + { + "bbox": [ + 310, + 134, + 564, + 168 + ], + "type": "text", + "content": "[495] J. Cheng and H. Amiri, \"Tool unlearning for tool-augmented llms,\" arXiv preprint arXiv:2502.01083, 2025." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 168, + 564, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 168, + 564, + 215 + ], + "spans": [ + { + "bbox": [ + 310, + 168, + 564, + 215 + ], + "type": "text", + "content": "[496] H. Liu, P. Xiong, T. Zhu, and S. Y. Philip, \"A survey on machine unlearning: Techniques and new emerged privacy risks,\" Journal of Information Security and Applications, vol. 90, p. 104010, 2025." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 215, + 564, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 215, + 564, + 261 + ], + "spans": [ + { + "bbox": [ + 310, + 215, + 564, + 261 + ], + "type": "text", + "content": "[497] S. Qureshi, T. Shaik, X. Tao, H. Xie, L. Li, J. Yong, and X. Jia, \"Exploring incremental unlearning: Techniques, challenges, and future directions,\" arXiv preprint arXiv:2502.16708, 2025." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 261, + 564, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 261, + 564, + 319 + ], + "spans": [ + { + "bbox": [ + 310, + 261, + 564, + 319 + ], + "type": "text", + "content": "[498] J. Geng, Q. Li, H. Woisetschlaeger, Z. Chen, Y. Wang, P. Nakov, H.-A. Jacobsen, and F. Karray, \"A comprehensive survey of machine unlearning techniques for large language models,\" arXiv preprint arXiv:2503.01854, 2025." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 319, + 564, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 319, + 564, + 388 + ], + "spans": [ + { + "bbox": [ + 310, + 319, + 564, + 388 + ], + "type": "text", + "content": "[499] X. He, C. Chen, L. Lyu, and Q. Xu, \"Extracted bert model leaks more information than you think!\" in Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, EMNLP 2022. Association for Computational Linguistics, 2022, pp. 1530-1537." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 388, + 564, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 388, + 564, + 445 + ], + "spans": [ + { + "bbox": [ + 310, + 388, + 564, + 445 + ], + "type": "text", + "content": "[500] X. He, Q. Xu, L. Lyu, F. Wu, and C. Wang, \"Protecting intellectual property of language generation apis with lexical watermark,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 36, no. 10, 2022, pp. 10758-10766." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 446, + 564, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 446, + 564, + 503 + ], + "spans": [ + { + "bbox": [ + 310, + 446, + 564, + 503 + ], + "type": "text", + "content": "[501] X. He, Q. Xu, Y. Zeng, L. Lyu, F. Wu, J. Li, and R. Jia, \"Cater: Intellectual property protection on text generation apis via conditional watermarks,\" Advances in Neural Information Processing Systems, vol. 35, pp. 5431-5445, 2022." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 503, + 564, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 503, + 564, + 584 + ], + "spans": [ + { + "bbox": [ + 310, + 503, + 564, + 584 + ], + "type": "text", + "content": "[502] W. Peng, J. Yi, F. Wu, S. Wu, B. B. Zhu, L. Lyu, B. Jiao, T. Xu, G. Sun, and X. Xie, \"Are you copying my model? protecting the copyright of large language models for eaas via backdoor watermark,\" in Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2023, pp. 7653-7668." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 585, + 564, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 585, + 564, + 632 + ], + "spans": [ + { + "bbox": [ + 310, + 585, + 564, + 632 + ], + "type": "text", + "content": "[503] N. Carlini, D. Paleka, K. D. Dvijotham, T. Steinke, J. Hayase, A. F. Cooper, K. Lee, M. Jagielski, M. Nasr, A. Conmy et al., \"Stealing part of a production language model,\" arXiv preprint arXiv:2403.06634, 2024." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 632, + 564, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 632, + 564, + 666 + ], + "spans": [ + { + "bbox": [ + 310, + 632, + 564, + 666 + ], + "type": "text", + "content": "[504] M. Finlayson, X. Ren, and S. Swayamdipta, \"Logits of api-protected llms leak proprietary information,\" arXiv preprint arXiv:2403.09539, 2024." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 666, + 564, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 666, + 564, + 712 + ], + "spans": [ + { + "bbox": [ + 310, + 666, + 564, + 712 + ], + "type": "text", + "content": "[505] S. Zanella-Beguelin, S. Tople, A. Paverd, and B. Köpf, \"Grey-box extraction of natural language models,\" in International Conference on Machine Learning. PMLR, 2021, pp. 12278-12286." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 712, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 712, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 712, + 564, + 746 + ], + "type": "text", + "content": "[506] E. Horwitz, J. Kahana, and Y. Hoshen, \"Recovering the pre-fine-tuning weights of generative models,\" arXiv preprint arXiv:2402.10208, 2024." + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "53" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 52 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 100 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 42, + 301, + 100 + ], + "spans": [ + { + "bbox": [ + 47, + 42, + 301, + 100 + ], + "type": "text", + "content": "[507] Z. Li, C. Wang, P. Ma, C. Liu, S. Wang, D. Wu, C. Gao, and Y. Liu, \"On extracting specialized code abilities from large language models: A feasibility study,\" in Proceedings of the IEEE/ACM 46th International Conference on Software Engineering, 2024, pp. 1-13." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 100, + 301, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 100, + 301, + 134 + ], + "spans": [ + { + "bbox": [ + 47, + 100, + 301, + 134 + ], + "type": "text", + "content": "[508] A. Liu and A. Moitra, \"Model stealing for any low-rank language model,\" arXiv preprint arXiv:2411.07536, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 134, + 301, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 134, + 301, + 179 + ], + "spans": [ + { + "bbox": [ + 47, + 134, + 301, + 179 + ], + "type": "text", + "content": "[509] W. Shi, A. Ajith, M. Xia, Y. Huang, D. Liu, T. Blevins, D. Chen, and L. Zettlemoyer, \"Detecting pretraining data from large language models,\" arXiv preprint arXiv:2310.16789, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 180, + 301, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 180, + 301, + 227 + ], + "spans": [ + { + "bbox": [ + 47, + 180, + 301, + 227 + ], + "type": "text", + "content": "[510] J. Zhang, J. Sun, E. Yeats, Y. Ouyang, M. Kuo, J. Zhang, H. F. Yang, and H. Li, \"Min-" + }, + { + "bbox": [ + 47, + 180, + 301, + 227 + ], + "type": "inline_equation", + "content": "k\\%" + }, + { + "bbox": [ + 47, + 180, + 301, + 227 + ], + "type": "text", + "content": "++: Improved baseline for detecting pre-training data from large language models,\" arXiv preprint arXiv:2404.02936, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 227, + 301, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 227, + 301, + 261 + ], + "spans": [ + { + "bbox": [ + 47, + 227, + 301, + 261 + ], + "type": "text", + "content": "[511] D. Das, J. Zhang, and F. Tramér, \"Blind baselines beat membership inference attacks for foundation models,\" arXiv preprint arXiv:2406.16201, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 261, + 301, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 261, + 301, + 308 + ], + "spans": [ + { + "bbox": [ + 47, + 261, + 301, + 308 + ], + "type": "text", + "content": "[512] P. Maini, H. Jia, N. Papernot, and A. Dziedzic, \"Llm dataset inference: Did you train on my dataset?\" Advances in Neural Information Processing Systems, vol. 37, pp. 124069-124092, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 308, + 301, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 301, + 353 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 301, + 353 + ], + "type": "text", + "content": "[513] A. V. Duarte, X. Zhao, A. L. Oliveira, and L. Li, \"De-cop: Detecting copyrighted content in language models training data,\" arXiv preprint arXiv:2402.09910, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 353, + 301, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 353, + 301, + 399 + ], + "spans": [ + { + "bbox": [ + 47, + 353, + 301, + 399 + ], + "type": "text", + "content": "[514] R. Xie, J. Wang, R. Huang, M. Zhang, R. Ge, J. Pei, N. Z. Gong, and B. Dhingra, \"Recall: Membership inference via relative conditional log-likelihoods,\" arXiv preprint arXiv:2406.15968, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 399, + 301, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 399, + 301, + 435 + ], + "spans": [ + { + "bbox": [ + 47, + 399, + 301, + 435 + ], + "type": "text", + "content": "[515] F. Galli, L. Melis, and T. Cucinotta, \"Noisy neighbors: Efficient membership inference attacks against llms,\" arXiv preprint arXiv:2406.16565, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 435, + 301, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 435, + 301, + 469 + ], + "spans": [ + { + "bbox": [ + 47, + 435, + 301, + 469 + ], + "type": "text", + "content": "[516] H. Mozaffari and V. J. Marathe, \"Semantic membership inference attack against large language models,\" arXiv preprint arXiv:2406.10218, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 469, + 301, + 527 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 469, + 301, + 527 + ], + "spans": [ + { + "bbox": [ + 47, + 469, + 301, + 527 + ], + "type": "text", + "content": "[517] M. Meeus, S. Jain, M. Rei, and Y.-A. de Montjoye, \"Did the neurons read your book? document-level membership inference for large language models,\" in 33rd USENIX Security Symposium (USENIX Security 24), 2024, pp. 2369-2385." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 527, + 301, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 527, + 301, + 561 + ], + "spans": [ + { + "bbox": [ + 47, + 527, + 301, + 561 + ], + "type": "text", + "content": "[518] M. Meeus, I. Shilov, M. Faysse, and Y.-A. De Montjoye, \"Copyright traps for large language models,\" arXiv preprint arXiv:2402.09363, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 561, + 301, + 607 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 561, + 301, + 607 + ], + "spans": [ + { + "bbox": [ + 47, + 561, + 301, + 607 + ], + "type": "text", + "content": "[519] H. Puerto, M. Gubri, S. Yun, and S. J. Oh, \"Scaling up membership inference: When and how attacks succeed on large language models,\" arXiv preprint arXiv:2411.00154, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 607, + 301, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 607, + 301, + 654 + ], + "spans": [ + { + "bbox": [ + 47, + 607, + 301, + 654 + ], + "type": "text", + "content": "[520] M. Anderson, G. Amit, and A. Goldsteen, “Is my data in your retrieval database? membership inference attacks against retrieval augmented generation,” arXiv preprint arXiv:2405.20446, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 654, + 301, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 654, + 301, + 700 + ], + "spans": [ + { + "bbox": [ + 47, + 654, + 301, + 700 + ], + "type": "text", + "content": "[521] Y. Li, G. Liu, C. Wang, and Y. Yang, \"Generating is believing: Membership inference attacks against retrieval-augmented generation,\" arXiv preprint arXiv:2406.19234, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 700, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 700, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 700, + 301, + 746 + ], + "type": "text", + "content": "[522] R. Wen, Z. Li, M. Backes, and Y. Zhang, \"Membership inference attacks against in-context learning,\" in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 3481-" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 43, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 335, + 43, + 358, + 53 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 43, + 358, + 53 + ], + "spans": [ + { + "bbox": [ + 335, + 43, + 358, + 53 + ], + "type": "text", + "content": "3495." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 53, + 564, + 87 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 53, + 564, + 87 + ], + "spans": [ + { + "bbox": [ + 310, + 53, + 564, + 87 + ], + "type": "text", + "content": "[523] H. Duan, A. Dziedzic, M. Yaghini, N. Papernot, and F. Boenisch, \"On the privacy risk of in-context learning,\" arXiv preprint arXiv:2411.10512, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 88, + 564, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 88, + 564, + 134 + ], + "spans": [ + { + "bbox": [ + 310, + 88, + 564, + 134 + ], + "type": "text", + "content": "[524] Y. Wen, L. Marchyok, S. Hong, J. Geiping, T. Goldstein, and N. Carlini, \"Privacy backdoors: Enhancing membership inference through poisoning pre-trained models,\" arXiv preprint arXiv:2404.01231, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 134, + 564, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 134, + 564, + 180 + ], + "spans": [ + { + "bbox": [ + 310, + 134, + 564, + 180 + ], + "type": "text", + "content": "[525] R. Wen, T. Wang, M. Backes, Y. Zhang, and A. Salem, \"Last one standing: A comparative analysis of security and privacy of soft prompt tuning, lora, and in-context learning,\" arXiv preprint arXiv:2310.11397, 2023." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 180, + 564, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 180, + 564, + 226 + ], + "spans": [ + { + "bbox": [ + 310, + 180, + 564, + 226 + ], + "type": "text", + "content": "[526] S. Balloccu, P. Schmidtová, M. Lango, and O. Dusek, \"Leak, cheat, repeat: Data contamination and evaluation malpractices in closed-source llms,\" arXiv preprint arXiv:2402.03927, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 227, + 564, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 227, + 564, + 285 + ], + "spans": [ + { + "bbox": [ + 310, + 227, + 564, + 285 + ], + "type": "text", + "content": "[527] W. Fu, H. Wang, C. Gao, G. Liu, Y. Li, and T. Jiang, \"Membership inference attacks against fine-tuned large language models via self-prompt calibration,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 285, + 564, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 285, + 564, + 331 + ], + "spans": [ + { + "bbox": [ + 310, + 285, + 564, + 331 + ], + "type": "text", + "content": "[528] H. Li, G. Deng, Y. Liu, K. Wang, Y. Li, T. Zhang, Y. Liu, G. Xu, G. Xu, and H. Wang, \"Digger: Detecting copyright content mis-usage in large language model training,\" arXiv preprint arXiv:2401.00676, 2024." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 331, + 564, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 331, + 564, + 376 + ], + "spans": [ + { + "bbox": [ + 310, + 331, + 564, + 376 + ], + "type": "text", + "content": "[529] A. Naseh and N. Mireshghallah, \"Synthetic data can mislead evaluations: Membership inference as machine text detection,\" arXiv preprint arXiv:2501.11786, 2025." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 376, + 564, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 376, + 564, + 423 + ], + "spans": [ + { + "bbox": [ + 310, + 376, + 564, + 423 + ], + "type": "text", + "content": "[530] Z. Liao and H. Sun, \"Amplegcg: Learning a universal and transferable generative model of adversarial suffixes for jailbreaking both open and closed llms,\" arXiv preprint arXiv:2404.07921, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 423, + 564, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 423, + 564, + 469 + ], + "spans": [ + { + "bbox": [ + 310, + 423, + 564, + 469 + ], + "type": "text", + "content": "[531] X. Jia, T. Pang, C. Du, Y. Huang, J. Gu, Y. Liu, X. Cao, and M. Lin, \"Improved techniques for optimization-based jailbreaking on large language models,\" arXiv preprint arXiv:2405.21018, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 469, + 564, + 516 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 469, + 564, + 516 + ], + "spans": [ + { + "bbox": [ + 310, + 469, + 564, + 516 + ], + "type": "text", + "content": "[532] Y. Zhang and Z. Wei, \"Boosting jailbreak attack with momentum,\" in ICASSP 2025-2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2025, pp. 1-5." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 516, + 564, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 516, + 564, + 584 + ], + "spans": [ + { + "bbox": [ + 310, + 516, + 564, + 584 + ], + "type": "text", + "content": "[533] Y. Zhao, W. Zheng, T. Cai, D. Xuan Long, K. Kawaguchi, A. Goyal, and M. Q. Shieh, \"Accelerating greedy coordinate gradient and general prompt optimization via probe sampling,\" Advances in Neural Information Processing Systems, vol. 37, pp. 53710-53731, 2024." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 584, + 564, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 584, + 564, + 620 + ], + "spans": [ + { + "bbox": [ + 310, + 584, + 564, + 620 + ], + "type": "text", + "content": "[534] X. Liu, N. Xu, M. Chen, and C. Xiao, \"Autodan: Generating stealthy jailbreak prompts on aligned large language models,\" arXiv preprint arXiv:2310.04451, 2023." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 620, + 564, + 675 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 620, + 564, + 675 + ], + "spans": [ + { + "bbox": [ + 310, + 620, + 564, + 675 + ], + "type": "text", + "content": "[535] S. Zhu, R. Zhang, B. An, G. Wu, J. Barrow, Z. Wang, F. Huang, A. Nenkova, and T. Sun, \"Autodan: interpretable gradient-based adversarial attacks on large language models,\" arXiv preprint arXiv:2310.15140, 2023." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 675, + 564, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 675, + 564, + 734 + ], + "spans": [ + { + "bbox": [ + 310, + 675, + 564, + 734 + ], + "type": "text", + "content": "[536] A. Mehrotra, M. Zampetakis, P. Kassianik, B. Nelson, H. Anderson, Y. Singer, and A. Karbasi, \"Tree of attacks: Jailbreaking black-box llms automatically,\" Advances in Neural Information Processing Systems, vol. 37, pp. 61-65, 2024." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 734, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 734, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 734, + 564, + 746 + ], + "type": "text", + "content": "[537] C. Sitawarin, N. Mu, D. Wagner, and A. Araujo," + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "54" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 53 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 72, + 42, + 299, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 42, + 299, + 65 + ], + "spans": [ + { + "bbox": [ + 72, + 42, + 299, + 65 + ], + "type": "text", + "content": "\"Pal: Proxy-guided black-box attack on large language models,\" arXiv preprint arXiv:2402.09674, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 66, + 301, + 111 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 66, + 301, + 111 + ], + "spans": [ + { + "bbox": [ + 47, + 66, + 301, + 111 + ], + "type": "text", + "content": "[538] G. Deng, Y. Liu, Y. Li, K. Wang, Y. Zhang, Z. Li, H. Wang, T. Zhang, and Y. Liu, \"Masterkey: Automated jailbreak across multiple large language model chatbots,\" arXiv preprint arXiv:2307.08715, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 112, + 301, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 112, + 301, + 158 + ], + "spans": [ + { + "bbox": [ + 47, + 112, + 301, + 158 + ], + "type": "text", + "content": "[539] X. Liu, P. Li, E. Suh, Y. Vorobeychik, Z. Mao, S. Jha, P. McDaniel, H. Sun, B. Li, and C. Xiao, \"Autodanturbo: A lifelong agent for strategy self-exploration to jailbreak llms,\" arXiv preprint arXiv:2410.05295, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 158, + 301, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 158, + 301, + 191 + ], + "spans": [ + { + "bbox": [ + 47, + 158, + 301, + 191 + ], + "type": "text", + "content": "[540] Y. Liu, X. He, M. Xiong, J. Fu, S. Deng, and B. Hooi, \"Flipattack: Jailbreak llms via flipping,\" arXiv preprint arXiv:2410.02832, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 192, + 301, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 192, + 301, + 239 + ], + "spans": [ + { + "bbox": [ + 47, + 192, + 301, + 239 + ], + "type": "text", + "content": "[541] T. Wu, Z. Xue, Y. Liu, J. Zhang, B. Hooi, and S.-K. Ng, \"Geneshift: Impact of different scenario shift on jailbreaking llm,\" 2025. [Online]. Available: https://arxiv.org/abs/2504.08104" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 239, + 301, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 239, + 301, + 273 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 301, + 273 + ], + "type": "text", + "content": "[542] F. Perez and I. Ribeiro, \"Ignore previous prompt: Attack techniques for language models,\" arXiv preprint arXiv:2211.09527, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 274, + 301, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 274, + 301, + 342 + ], + "spans": [ + { + "bbox": [ + 47, + 274, + 301, + 342 + ], + "type": "text", + "content": "[543] K. Greshake, S. Abdelnabi, S. Mishra, C. Endres, T. Holz, and M. Fritz, \"Not what you've signed up for: Compromising real-world llm-integrated applications with indirect prompt injection,\" in Proceedings of the 16th ACM Workshop on Artificial Intelligence and Security, 2023, pp. 79-90." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 342, + 301, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 342, + 301, + 388 + ], + "spans": [ + { + "bbox": [ + 47, + 342, + 301, + 388 + ], + "type": "text", + "content": "[544] Y. Liu, G. Deng, Y. Li, K. Wang, Z. Wang, X. Wang, T. Zhang, Y. Liu, H. Wang, Y. Zheng et al., \"Prompt injection attack against llm-integrated applications,\" arXiv preprint arXiv:2306.05499, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 388, + 301, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 388, + 301, + 446 + ], + "spans": [ + { + "bbox": [ + 47, + 388, + 301, + 446 + ], + "type": "text", + "content": "[545] S. Toyer, O. Watkins, E. A. Mendes, J. Svegliato, L. Bailey, T. Wang, I. Ong, K. Elmaaroufi, P. Abbeel, T. Darrell et al., \"Tensor trust: Interpretable prompt injection attacks from an online game,\" arXiv preprint arXiv:2311.01011, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 447, + 301, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 447, + 301, + 504 + ], + "spans": [ + { + "bbox": [ + 47, + 447, + 301, + 504 + ], + "type": "text", + "content": "[546] J. Shi, Z. Yuan, Y. Liu, Y. Huang, P. Zhou, L. Sun, and N. Z. Gong, \"Optimization-based prompt injection attack to lmm-as-a-judge,\" in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 660-674." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 504, + 301, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 504, + 301, + 550 + ], + "spans": [ + { + "bbox": [ + 47, + 504, + 301, + 550 + ], + "type": "text", + "content": "[547] X. Liu, Z. Yu, Y. Zhang, N. Zhang, and C. Xiao, \"Automatic and universal prompt injection attacks against large language models,\" arXiv preprint arXiv:2403.04957, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 550, + 301, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 550, + 301, + 584 + ], + "spans": [ + { + "bbox": [ + 47, + 550, + 301, + 584 + ], + "type": "text", + "content": "[548] X. Liu, S. Jha, P. McDaniel, B. Li, and C. Xiao, \"Autohijacker: Automatic indirect prompt injection against black-box llm agents.\"" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 585, + 301, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 585, + 301, + 631 + ], + "spans": [ + { + "bbox": [ + 47, + 585, + 301, + 631 + ], + "type": "text", + "content": "[549] A. Al-Kaswan, M. Izadi, and A. Van Deursen, \"Targeted attack on gpt-neo for the satml language model data extraction challenge,\" arXiv preprint arXiv:2302.07735, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 632, + 301, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 632, + 301, + 677 + ], + "spans": [ + { + "bbox": [ + 47, + 632, + 301, + 677 + ], + "type": "text", + "content": "[550] E. Su, A. Vellore, A. Chang, R. Mura, B. Nelson, P. Kassianik, and A. Karbasi, \"Extracting memorized training data via decomposition,\" arXiv preprint arXiv:2409.12367, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 677, + 301, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 301, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 301, + 712 + ], + "type": "text", + "content": "[551] J. Huang, H. Shao, and K. C.-C. Chang, \"Are large pre-trained language models leaking your personal information?\" arXiv preprint arXiv:2205.12628, 2022." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "type": "text", + "content": "[552] Z. Zhang, J. Wen, and M. Huang, \"Ethicist: Targeted training data extraction through loss smoothed soft prompting and calibrated confidence estimation,\"" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 747 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 335, + 42, + 493, + 54 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 493, + 54 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 493, + 54 + ], + "type": "text", + "content": "arXiv preprint arXiv:2307.04401, 2023." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 54, + 564, + 100 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 54, + 564, + 100 + ], + "spans": [ + { + "bbox": [ + 310, + 54, + 564, + 100 + ], + "type": "text", + "content": "[553] K. K. Nakka, A. Frikha, R. Mendes, X. Jiang, and X. Zhou, \"Pii-compass: Guiding llm training data extraction prompts towards the target pii via grounding,\" arXiv preprint arXiv:2407.02943, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 100, + 564, + 147 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 100, + 564, + 147 + ], + "spans": [ + { + "bbox": [ + 310, + 100, + 564, + 147 + ], + "type": "text", + "content": "[554] Z. Wang, R. Bao, Y. Wu, J. Taylor, C. Xiao, F. Zheng, W. Jiang, S. Gao, and Y. Zhang, \"Unlocking memorization in large language models with dynamic soft prompting,\" arXiv preprint arXiv:2409.13853, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 147, + 564, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 147, + 564, + 192 + ], + "spans": [ + { + "bbox": [ + 310, + 147, + 564, + 192 + ], + "type": "text", + "content": "[555] J. G. Wang, J. Wang, M. Li, and S. Neel, \"Pandora's white-box: Precise training data detection and extraction in large language models,\" arXiv preprint arXiv:2402.17012, 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 192, + 564, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 192, + 564, + 227 + ], + "spans": [ + { + "bbox": [ + 310, + 192, + 564, + 227 + ], + "type": "text", + "content": "[556] Z. Sha and Y. Zhang, \"Prompt stealing attacks against large language models,\" arXiv preprint arXiv:2402.12959, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 227, + 564, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 227, + 564, + 261 + ], + "spans": [ + { + "bbox": [ + 310, + 227, + 564, + 261 + ], + "type": "text", + "content": "[557] C. Zhang, J. X. Morris, and V. Shmatikov, \"Extracting prompts by inverting llm outputs,\" arXiv preprint arXiv:2405.15012, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 261, + 564, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 261, + 564, + 308 + ], + "spans": [ + { + "bbox": [ + 310, + 261, + 564, + 308 + ], + "type": "text", + "content": "[558] Y. Yang, C. Li, Y. Jiang, X. Chen, H. Wang, X. Zhang, Z. Wang, and S. Ji, \"Prsa: Prompt stealing attacks against large language models,\" arXiv preprint arXiv:2402.19200, 2024." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 308, + 564, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 308, + 564, + 376 + ], + "spans": [ + { + "bbox": [ + 310, + 308, + 564, + 376 + ], + "type": "text", + "content": "[559] Y. Zeng, H. Lin, J. Zhang, D. Yang, R. Jia, and W. Shi, \"How johnny can persuade llms to jailbreak them: Rethinking persuasion to challenge ai safety by humanizing llms,\" in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2024, pp. 14322-14350." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 376, + 564, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 376, + 564, + 446 + ], + "spans": [ + { + "bbox": [ + 310, + 376, + 564, + 446 + ], + "type": "text", + "content": "[560] X. Shen, Z. Chen, M. Backes, Y. Shen, and Y. Zhang, \"do anything now\": Characterizing and evaluating in-the-wild jailbreak prompts on large language models,\" in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 1671-1685." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 447, + 564, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 447, + 564, + 492 + ], + "spans": [ + { + "bbox": [ + 310, + 447, + 564, + 492 + ], + "type": "text", + "content": "[561] Z. Wang, W. Xie, B. Wang, E. Wang, Z. Gui, S. Ma, and K. Chen, \"Foot in the door: Understanding large language model jailbreaking via cognitive psychology,\" arXiv preprint arXiv:2402.15690, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 492, + 564, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 492, + 564, + 562 + ], + "spans": [ + { + "bbox": [ + 310, + 492, + 564, + 562 + ], + "type": "text", + "content": "[562] M. Samvelyan, S. C. Raparthy, A. Lupu, E. Hambro, A. Markosyan, M. Bhatt, Y. Mao, M. Jiang, J. Parker-Holder, J. Foerster et al., \"Rainbow teaming: Open-ended generation of diverse adversarial prompts,\" Advances in Neural Information Processing Systems, vol. 37, pp. 69747-69786, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 562, + 564, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 562, + 564, + 608 + ], + "spans": [ + { + "bbox": [ + 310, + 562, + 564, + 608 + ], + "type": "text", + "content": "[563] H. Jin, R. Chen, A. Zhou, Y. Zhang, and H. Wang, \"Guard: Role-playing to generate natural-language jailbreakings to test guideline adherence of large language models,\" arXiv preprint arXiv:2402.03299, 2024." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 608, + 564, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 608, + 564, + 653 + ], + "spans": [ + { + "bbox": [ + 310, + 608, + 564, + 653 + ], + "type": "text", + "content": "[564] Y. Yuan, W. Jiao, W. Wang, J.-t. Huang, P. He, S. Shi, and Z. Tu, \"Gpt-4 is too smart to be safe: Stealthy chat with llms via cipher,\" arXiv preprint arXiv:2308.06463, 2023." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 654, + 564, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 654, + 564, + 712 + ], + "spans": [ + { + "bbox": [ + 310, + 654, + 564, + 712 + ], + "type": "text", + "content": "[565] H. Lv, X. Wang, Y. Zhang, C. Huang, S. Dou, J. Ye, T. Gui, Q. Zhang, and X. Huang, \"Codechameleon: Personalized encryption framework for jailbreaking large language models,\" arXiv preprint arXiv:2402.16717, 2024." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 712, + 564, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 712, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 310, + 712, + 564, + 747 + ], + "type": "text", + "content": "[566] F. Jiang, Z. Xu, L. Niu, Z. Xiang, B. Ramasubramanian, B. Li, and R. Poovendran, \"Artprompt: Ascii art-based jailbreak attacks against aligned llms,\" in Proceedings" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "55" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 54 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 71, + 42, + 299, + 76 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 299, + 76 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 299, + 76 + ], + "type": "text", + "content": "of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2024, pp. 15 157-15 173." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 77, + 301, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 77, + 301, + 133 + ], + "spans": [ + { + "bbox": [ + 47, + 77, + 301, + 133 + ], + "type": "text", + "content": "[567] C. Anil, E. Durmus, N. Panickssery, M. Sharma, J. Benton, S. Kundu, J. Batson, M. Tong, J. Mu, D. Ford et al., \"Many-shot jailbreaking,\" Advances in Neural Information Processing Systems, vol. 37, pp. 129-696-129742, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 134, + 301, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 134, + 301, + 168 + ], + "spans": [ + { + "bbox": [ + 47, + 134, + 301, + 168 + ], + "type": "text", + "content": "[568] Z.-X. Yong, C. Menghini, and S. H. Bach, \"Low-resource languages jailbreak gpt-4,\" arXiv preprint arXiv:2310.02446, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 169, + 301, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 169, + 301, + 238 + ], + "spans": [ + { + "bbox": [ + 47, + 169, + 301, + 238 + ], + "type": "text", + "content": "[569] W. Wang, Z. Tu, C. Chen, Y. Yuan, J.-T. Huang, W. Jiao, and M. R. Lyu, \"All languages matter: On the multilingual safety of llms,\" in Annual Meeting of the Association for Computational Linguistics, 2024. [Online]. Available: https://api-semanticscholar.org/ CorpusID:271931322" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 239, + 301, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 239, + 301, + 284 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 301, + 284 + ], + "type": "text", + "content": "[570] Z. Wei, Y. Wang, A. Li, Y. Mo, and Y. Wang, \"Jailbreak and guard aligned language models with only few in-context demonstrations,\" arXiv preprint arXiv:2310.06387, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 285, + 301, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 285, + 301, + 330 + ], + "spans": [ + { + "bbox": [ + 47, + 285, + 301, + 330 + ], + "type": "text", + "content": "[571] N. Xu, F. Wang, B. Zhou, B. Z. Li, C. Xiao, and M. Chen, \"Cognitive overload: Jailbreaking large language models with overloaded logical thinking,\" arXiv preprint arXiv:2311.09827, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 331, + 301, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 331, + 301, + 376 + ], + "spans": [ + { + "bbox": [ + 47, + 331, + 301, + 376 + ], + "type": "text", + "content": "[572] P. Ding, J. Kuang, D. Ma, X. Cao, Y. Xian, J. Chen, and S. Huang, \"A wolf in sheep's clothing: Generalized nested jailbreak prompts can fool large language models easily,\" arXiv preprint arXiv:2311.08268, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 377, + 301, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 377, + 301, + 411 + ], + "spans": [ + { + "bbox": [ + 47, + 377, + 301, + 411 + ], + "type": "text", + "content": "[573] B. Upadhayay and V. Behzadan, \"Sandwich attack: Multi-language mixture adaptive attack on llms,\" arXiv preprint arXiv:2404.07242, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 412, + 301, + 481 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 412, + 301, + 481 + ], + "spans": [ + { + "bbox": [ + 47, + 412, + 301, + 481 + ], + "type": "text", + "content": "[574] D. Yao, J. Zhang, I. G. Harris, and M. Carlsson, \"Fuzzllm: A novel and universal fuzzing framework for proactively discovering jailbreak vulnerabilities in large language models,\" in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 4485-4489." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 481, + 301, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 481, + 301, + 538 + ], + "spans": [ + { + "bbox": [ + 47, + 481, + 301, + 538 + ], + "type": "text", + "content": "[575] B. Li, H. Xing, C. Huang, J. Qian, H. Xiao, L. Feng, and C. Tian, \"Structuralsleight: Automated jailbreak attacks on large language models utilizing uncommon text-encoded structure,\" arXiv e-prints, pp. arXiv-2406, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 539, + 301, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 539, + 301, + 582 + ], + "spans": [ + { + "bbox": [ + 47, + 539, + 301, + 582 + ], + "type": "text", + "content": "[576] A. Paulus, A. Zharmagambetov, C. Guo, B. Amos, and Y. Tian, \"Advprompter: Fast adaptive adversarial prompting for llms,\" arXiv preprint arXiv:2404.16873, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 584, + 301, + 630 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 584, + 301, + 630 + ], + "spans": [ + { + "bbox": [ + 47, + 584, + 301, + 630 + ], + "type": "text", + "content": "[577] A. Wei, N. Haghtalab, and J. Steinhardt, \"Jailbroken: How does llm safety training fail?\" Advances in Neural Information Processing Systems, vol. 36, pp. 80079-80110, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 632, + 301, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 632, + 301, + 689 + ], + "spans": [ + { + "bbox": [ + 47, + 632, + 301, + 689 + ], + "type": "text", + "content": "[578] Z. Chen, Z. Zhao, W. Qu, Z. Wen, Z. Han, Z. Zhu, J. Zhang, and H. Yao, \"Pandora: Detailed llm jailbreaking via collaborated phishing agents with decomposed reasoning,\" in ICLR 2024 Workshop on Secure and Trustworthy Large Language Models, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 689, + 301, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 301, + 734 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 301, + 734 + ], + "type": "text", + "content": "[579] E. Perez, S. Huang, F. Song, T. Cai, R. Ring, J. Aslanides, A. Glaese, N. McAleese, and G. Irving, \"Red teaming language models with language models,\" arXiv preprint arXiv:2202.03286, 2022." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 735, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 735, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 735, + 301, + 746 + ], + "type": "text", + "content": "[580] R. Shah, S. Pour, A. Tagade, S. Casper, J. Rando et al.," + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 76 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 76 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 76 + ], + "type": "text", + "content": "\"Scalable and transferable black-box jailbreaks for language models via persona modulation,\" arXiv preprint arXiv:2311.03348, 2023." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 77, + 564, + 111 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 77, + 564, + 111 + ], + "spans": [ + { + "bbox": [ + 310, + 77, + 564, + 111 + ], + "type": "text", + "content": "[581] X. Guo, F. Yu, H. Zhang, L. Qin, and B. Hu, \"Coldattack: Jailbreaking lms with stealthiness and controllability,\" arXiv preprint arXiv:2402.08679, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 112, + 564, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 112, + 564, + 156 + ], + "spans": [ + { + "bbox": [ + 310, + 112, + 564, + 156 + ], + "type": "text", + "content": "[582] J. Yu, H. Luo, J. Y.-C. Hu, W. Guo, H. Liu, and X. Xing, \"Enhancing jailbreak attack against large language models through silent tokens,\" arXiv preprint arXiv:2405.20653, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 158, + 564, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 158, + 564, + 204 + ], + "spans": [ + { + "bbox": [ + 310, + 158, + 564, + 204 + ], + "type": "text", + "content": "[583] Z.-W. Hong, I. Shenfeld, T.-H. Wang, Y.-S. Chuang, A. Pareja, J. Glass, A. Srivastava, and P. Agrawal, \"Curiosity-driven red-teaming for large language models,\" arXiv preprint arXiv:2402.19464, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 205, + 564, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 205, + 564, + 262 + ], + "spans": [ + { + "bbox": [ + 310, + 205, + 564, + 262 + ], + "type": "text", + "content": "[584] X. Zheng, T. Pang, C. Du, Q. Liu, J. Jiang, and M. Lin, \"Improved few-shot jailbreaking can circumvent aligned language models and their defenses,\" Advances in Neural Information Processing Systems, vol. 37, pp. 32-856-32-887, 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 262, + 564, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 262, + 564, + 296 + ], + "spans": [ + { + "bbox": [ + 310, + 262, + 564, + 296 + ], + "type": "text", + "content": "[585] Z. Xiao, Y. Yang, G. Chen, and Y. Chen, \"Distract large language models for automatic jailbreak attack,\" arXiv preprint arXiv:2403.08424, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 297, + 564, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 297, + 564, + 342 + ], + "spans": [ + { + "bbox": [ + 310, + 297, + 564, + 342 + ], + "type": "text", + "content": "[586] Z. Chang, M. Li, Y. Liu, J. Wang, Q. Wang, and Y. Liu, \"Play guessing game with llm: Indirect jailbreak attack with implicit clues,\" arXiv preprint arXiv:2402.09091, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 342, + 564, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 342, + 564, + 388 + ], + "spans": [ + { + "bbox": [ + 310, + 342, + 564, + 388 + ], + "type": "text", + "content": "[587] J. Yu, X. Lin, Z. Yu, and X. Xing, \"Gptfuzzer: Red teaming large language models with auto-generated jailbreak prompts,\" arXiv preprint arXiv:2309.10253, 2023." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 388, + 564, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 388, + 564, + 445 + ], + "spans": [ + { + "bbox": [ + 310, + 388, + 564, + 445 + ], + "type": "text", + "content": "[588] W. Jiang, Z. Wang, J. Zhai, S. Ma, Z. Zhao, and C. Shen, \"Unlocking adversarial suffix optimization without affirmative phrases: Efficient black-box jailbreaking via llm as optimizer,\" arXiv preprint arXiv:2408.11313, 2024." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 447, + 564, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 447, + 564, + 480 + ], + "spans": [ + { + "bbox": [ + 310, + 447, + 564, + 480 + ], + "type": "text", + "content": "[589] J. Zhang, Z. Wang, R. Wang, X. Ma, and Y.-G. Jiang, \"Enja: Ensemble jailbreak on large language models,\" arXiv preprint arXiv:2408.03603, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 481, + 564, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 481, + 564, + 515 + ], + "spans": [ + { + "bbox": [ + 310, + 481, + 564, + 515 + ], + "type": "text", + "content": "[590] X. Zhao, X. Yang, T. Pang, C. Du, L. Li, Y.-X. Wang, and W. Y. Wang, \"Weak-to-strong jailbreaking on large language models,\" arXiv preprint arXiv:2401.17256, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 516, + 564, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 516, + 564, + 550 + ], + "spans": [ + { + "bbox": [ + 310, + 516, + 564, + 550 + ], + "type": "text", + "content": "[591] B. Upadhayay, V. Behzadan, and A. Karbasi, \"Cognitive overload attack: Prompt injection for long context,\" arXiv preprint arXiv:2410.11272, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 551, + 564, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 551, + 564, + 594 + ], + "spans": [ + { + "bbox": [ + 310, + 551, + 564, + 594 + ], + "type": "text", + "content": "[592] H. Kwon and W. Pak, \"Text-based prompt injection attack using mathematical functions in modern large language models,\" *Electronics*, vol. 13, no. 24, p. 5008, 2024." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 596, + 564, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 596, + 564, + 643 + ], + "spans": [ + { + "bbox": [ + 310, + 596, + 564, + 643 + ], + "type": "text", + "content": "[593] E. Bagdasaryan, T.-Y. Hsieh, B. Nassi, and V. Shmatikov, \"Abusing images and sounds for indirect instruction injection in multi-modal llms,\" arXiv preprint arXiv:2307.10490, 2023." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 643, + 564, + 699 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 643, + 564, + 699 + ], + "spans": [ + { + "bbox": [ + 310, + 643, + 564, + 699 + ], + "type": "text", + "content": "[594] D. Pasquini, M. Strohmeier, and C. Troncoso, \"Neural exec: Learning (and learning from) execution triggers for prompt injection attacks,\" in Proceedings of the 2024 Workshop on Artificial Intelligence and Security, 2024, pp. 89-100." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 700, + 564, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 700, + 564, + 734 + ], + "spans": [ + { + "bbox": [ + 310, + 700, + 564, + 734 + ], + "type": "text", + "content": "[595] Z. Shao, H. Liu, J. Mu, and N. Z. Gong, \"Making llms vulnerable to prompt injection via poisoning alignment,\" arXiv preprint arXiv:2410.14827, 2024." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 735, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 735, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 735, + 564, + 746 + ], + "type": "text", + "content": "[596] Y. Yang, H. Yao, B. Yang, Y. He, Y. Li, T. Zhang," + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "56" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 55 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 42, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 70, + 42, + 301, + 77 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 42, + 301, + 77 + ], + "spans": [ + { + "bbox": [ + 70, + 42, + 301, + 77 + ], + "type": "text", + "content": "Z. Qin, and K. Ren, \"Tapi: Towards target-specific and adversarial prompt injection against code llms,\" arXiv preprint arXiv:2407.09164, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 77, + 301, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 77, + 301, + 112 + ], + "spans": [ + { + "bbox": [ + 46, + 77, + 301, + 112 + ], + "type": "text", + "content": "[597] Y. Ren, \"F2a: An innovative approach for prompt injection by utilizing feign security detection agents,\" arXiv preprint arXiv:2410.08776, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 112, + 301, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 112, + 301, + 158 + ], + "spans": [ + { + "bbox": [ + 46, + 112, + 301, + 158 + ], + "type": "text", + "content": "[598] R. Pedro, D. Castro, P. Carreira, and N. Santos, \"From prompt injections to sql injection attacks: How protected is your llm-integrated web application?\" arXiv preprint arXiv:2308.01990, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 158, + 301, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 158, + 301, + 204 + ], + "spans": [ + { + "bbox": [ + 46, + 158, + 301, + 204 + ], + "type": "text", + "content": "[599] Y. Lee, T. Park, Y. Lee, J. Gong, and J. Kang, \"Exploring potential prompt injection attacks in federated military Ilms and their mitigation,\" arXiv preprint arXiv:2501.18416, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 205, + 301, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 205, + 301, + 239 + ], + "spans": [ + { + "bbox": [ + 46, + 205, + 301, + 239 + ], + "type": "text", + "content": "[600] D. Lee and M. Tiwari, \"Prompt infection: Llm-to-llm prompt injection within multi-agent systems,\" arXiv preprint arXiv:2410.07283, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 239, + 301, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 239, + 301, + 306 + ], + "spans": [ + { + "bbox": [ + 46, + 239, + 301, + 306 + ], + "type": "text", + "content": "[601] W. Zhang, X. Kong, C. Dewitt, T. Braunl, and J. B. Hong, \"A study on prompt injection attack against lvm-integrated mobile robotic systems,\" in 2024 IEEE 35th International Symposium on Software Reliability Engineering Workshops (ISSREW). IEEE, 2024, pp. 361-368." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 308, + 301, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 308, + 301, + 354 + ], + "spans": [ + { + "bbox": [ + 46, + 308, + 301, + 354 + ], + "type": "text", + "content": "[602] W. Meng, Z. Guo, L. Wu, C. Gong, W. Liu, W. Li, C. Wei, and W. Chen, \"Rr: Unveiling llm training privacy through recollection and ranking,\" arXiv preprint arXiv:2502.12658, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 354, + 301, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 354, + 301, + 400 + ], + "spans": [ + { + "bbox": [ + 46, + 354, + 301, + 400 + ], + "type": "text", + "content": "[603] B. Jayaraman, E. Ghosh, H. Inan, M. Chase, S. Roy, and W. Dai, \"Active data pattern extraction attacks on generative language models,\" arXiv preprint arXiv:2207.10802, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 400, + 301, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 400, + 301, + 458 + ], + "spans": [ + { + "bbox": [ + 46, + 400, + 301, + 458 + ], + "type": "text", + "content": "[604] Z. Zeng, T. Xiang, S. Guo, J. He, Q. Zhang, G. Xu, and T. Zhang, \"Contrast-then-approximate: Analyzing keyword leakage of generative language models,\" IEEE Transactions on Information Forensics and Security, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 458, + 301, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 458, + 301, + 514 + ], + "spans": [ + { + "bbox": [ + 46, + 458, + 301, + 514 + ], + "type": "text", + "content": "[605] C. Jiang, X. Pan, G. Hong, C. Bao, and M. Yang, \"Rag-thief: Scalable extraction of private data from retrieval-augmented generation applications with agent-based attacks,\" arXiv preprint arXiv:2411.14110, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 515, + 301, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 515, + 301, + 562 + ], + "spans": [ + { + "bbox": [ + 46, + 515, + 301, + 562 + ], + "type": "text", + "content": "[606] Z. Qi, H. Zhang, E. Xing, S. Kakade, and H. Lakkaraju, \"Follow my instruction and spill the beans: Scalable data extraction from retrieval-augmented generation systems,\" arXiv preprint arXiv:2402.17840, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 562, + 301, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 562, + 301, + 619 + ], + "spans": [ + { + "bbox": [ + 46, + 562, + 301, + 619 + ], + "type": "text", + "content": "[607] S. Zeng, J. Zhang, P. He, Y. Xing, Y. Liu, H. Xu, J. Ren, S. Wang, D. Yin, Y. Chang et al., \"The good and the bad: Exploring privacy issues in retrieval-augmented generation (rag),\" arXiv preprint arXiv:2402.16893, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 46, + 619, + 301, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 619, + 301, + 654 + ], + "spans": [ + { + "bbox": [ + 46, + 619, + 301, + 654 + ], + "type": "text", + "content": "[608] Y. Peng, J. Wang, H. Yu, and A. Houmansadr, \"Data extraction attacks in retrieval-augmented generation via backdoors,\" arXiv preprint arXiv:2411.01705, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 46, + 654, + 301, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 654, + 301, + 700 + ], + "spans": [ + { + "bbox": [ + 46, + 654, + 301, + 700 + ], + "type": "text", + "content": "[609] A. Panda, C. A. Choquette-Choo, Z. Zhang, Y. Yang, and P. Mittal, \"Teach llms to phish: Stealing private information from language models,\" arXiv preprint arXiv:2403.00871, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 46, + 700, + 301, + 735 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 700, + 301, + 735 + ], + "spans": [ + { + "bbox": [ + 46, + 700, + 301, + 735 + ], + "type": "text", + "content": "[610] L. Lu, Z. Zuo, Z. Sheng, and P. Zhou, “Merger-as-a-stealer: Stealing targeted pii from aligned llms with model merging,” arXiv preprint arXiv:2502.16094, 2025." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 46, + 735, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 735, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 46, + 735, + 301, + 747 + ], + "type": "text", + "content": "[611] X. Chen, S. Tang, R. Zhu, S. Yan, L. Jin, Z. Wang, L. Su," + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 747 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 333, + 42, + 564, + 101 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 42, + 564, + 101 + ], + "spans": [ + { + "bbox": [ + 333, + 42, + 564, + 101 + ], + "type": "text", + "content": "Z. Zhang, X. Wang, and H. Tang, \"The janus interface: How fine-tuning in large language models amplifies the privacy risks,\" in Proceedings of the 2024 on ACM SIGSAC Conference on Computer and Communications Security, 2024, pp. 1285-1299." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 101, + 564, + 124 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 101, + 564, + 124 + ], + "spans": [ + { + "bbox": [ + 310, + 101, + 564, + 124 + ], + "type": "text", + "content": "[612] R. Panchendrarajan and S. Bhoi, \"Dataset reconstruction attack against language models,\" 2021." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 124, + 564, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 124, + 564, + 169 + ], + "spans": [ + { + "bbox": [ + 310, + 124, + 564, + 169 + ], + "type": "text", + "content": "[613] M. R. U. Rashid, V. A. Dasu, K. Gu, N. Sultana, and S. Mehnaz, \"Fltrojan: Privacy leakage attacks against federated language models through selective weight tampering,\" arXiv preprint arXiv:2310.16152, 2023." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 169, + 564, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 169, + 564, + 216 + ], + "spans": [ + { + "bbox": [ + 310, + 169, + 564, + 216 + ], + "type": "text", + "content": "[614] J. Dentan, A. Paran, and A. Shabou, \"Reconstructing training data from document understanding models,\" in 33rd USENIX Security Symposium (USENIX Security 24), 2024, pp. 6813-6830." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 216, + 564, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 216, + 564, + 274 + ], + "spans": [ + { + "bbox": [ + 310, + 216, + 564, + 274 + ], + "type": "text", + "content": "[615] J. Hósciłowicz, P. Popiołek, J. Rudkowski, J. Bieniasz, and A. Janicki, \"Unconditional token forcing: Extracting text hidden within llm,\" in 2024 19th Conference on Computer Science and Intelligence Systems (FedCSIS). IEEE, 2024, pp. 621-624." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 274, + 564, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 274, + 564, + 319 + ], + "spans": [ + { + "bbox": [ + 310, + 274, + 564, + 319 + ], + "type": "text", + "content": "[616] A. Al-Kaswan, M. Izadi, and A. Van Deursen, \"Traces of memorisation in large language models for code,\" in Proceedings of the IEEE/ACM 46th International Conference on Software Engineering, 2024, pp. 1-12." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 319, + 564, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 319, + 564, + 365 + ], + "spans": [ + { + "bbox": [ + 310, + 319, + 564, + 365 + ], + "type": "text", + "content": "[617] Y. Nie, C. Wang, K. Wang, G. Xu, G. Xu, and H. Wang, \"Decoding secret memorization in code llms through token-level characterization,\" arXiv preprint arXiv:2410.08858, 2024." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 365, + 564, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 365, + 564, + 400 + ], + "spans": [ + { + "bbox": [ + 310, + 365, + 564, + 400 + ], + "type": "text", + "content": "[618] E. Lehman, S. Jain, K. Pichotta, Y. Goldberg, and B. C. Wallace, \"Does bert pretrained on clinical notes reveal sensitive data?\" arXiv preprint arXiv:2104.07762, 2021." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 400, + 564, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 400, + 564, + 458 + ], + "spans": [ + { + "bbox": [ + 310, + 400, + 564, + 458 + ], + "type": "text", + "content": "[619] A. Diera, N. Lell, A. Garifullina, and A. Scherp, \"Memorization of named entities in fine-tuned bert models,\" in International Cross-Domain Conference for Machine Learning and Knowledge Extraction. Springer, 2023, pp. 258-279." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 458, + 564, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 458, + 564, + 504 + ], + "spans": [ + { + "bbox": [ + 310, + 458, + 564, + 504 + ], + "type": "text", + "content": "[620] R. Zhang, S. Hidano, and F. Koushanfar, \"Text re- vealer: Private text reconstruction via model inversion attacks against transformers,\" arXiv preprint arXiv:2209.10505, 2022." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 504, + 564, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 504, + 564, + 562 + ], + "spans": [ + { + "bbox": [ + 310, + 504, + 564, + 562 + ], + "type": "text", + "content": "[621] Y. Huang, Y. Li, W. Wu, J. Zhang, and M. R. Lyu, \"Your code secret belongs to me: neural code completion tools can memorize hard-coded credentials,\" Proceedings of the ACM on Software Engineering, vol. 1, no. FSE, pp. 2515-2537, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 562, + 564, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 562, + 564, + 596 + ], + "spans": [ + { + "bbox": [ + 310, + 562, + 564, + 596 + ], + "type": "text", + "content": "[622] T. Tiwari and G. E. Suh, \"Sequence-level analysis of leakage risk of training data in large language models,\" arXiv preprint arXiv:2412.11302, 2024." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 596, + 564, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 596, + 564, + 643 + ], + "spans": [ + { + "bbox": [ + 310, + 596, + 564, + 643 + ], + "type": "text", + "content": "[623] H. Shao, J. Huang, S. Zheng, and K. C.-C. Chang, \"Quantifying association capabilities of large language models and its implications on privacy leakage,\" arXiv preprint arXiv:2305.12707, 2023." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 643, + 564, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 643, + 564, + 677 + ], + "spans": [ + { + "bbox": [ + 310, + 643, + 564, + 677 + ], + "type": "text", + "content": "[624] Y. More, P. Ganesh, and G. Farnadi, \"Towards more realistic extraction attacks: An adversarial perspective,\" arXiv preprint arXiv:2407.02596, 2024." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 677, + 564, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 677, + 564, + 723 + ], + "spans": [ + { + "bbox": [ + 310, + 677, + 564, + 723 + ], + "type": "text", + "content": "[625] R. Staab, M. Vero, M. Balunović, and M. Vechev, \"Beyond memorization: Violating privacy via inference with large language models,\" arXiv preprint arXiv:2310.07298, 2023." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 723, + 564, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 723, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 310, + 723, + 564, + 747 + ], + "type": "text", + "content": "[626] H. Xu, Z. Zhang, X. Yu, Y. Wu, Z. Zha, B. Xu, W. Xu, M. Hu, and K. Peng, \"Targeted training data extrac" + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "57" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 56 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 71, + 42, + 301, + 77 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 301, + 77 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 301, + 77 + ], + "type": "text", + "content": "tion—neighborhood comparison-based membership inference attacks in large language models,\" Applied Sciences, vol. 14, no. 16, p. 7118, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 77, + 301, + 111 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 77, + 301, + 111 + ], + "spans": [ + { + "bbox": [ + 47, + 77, + 301, + 111 + ], + "type": "text", + "content": "[627] A. Karamolegkou, J. Li, L. Zhou, and A. Søgaard, \"Copyright violations and large language models,\" arXiv preprint arXiv:2310.13771, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 112, + 301, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 112, + 301, + 157 + ], + "spans": [ + { + "bbox": [ + 47, + 112, + 301, + 157 + ], + "type": "text", + "content": "[628] X. Zheng, H. Han, S. Shi, Q. Fang, Z. Du, X. Hu, and Q. Guo, \"Inputsnatch: Stealing input in llm services via timing side-channel attacks,\" arXiv preprint arXiv:2411.18191, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 158, + 301, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 158, + 301, + 203 + ], + "spans": [ + { + "bbox": [ + 47, + 158, + 301, + 203 + ], + "type": "text", + "content": "[629] Y. Dong, R. Mu, G. Jin, Y. Qi, J. Hu, X. Zhao, J. Meng, W. Ruan, and X. Huang, \"Building guardrails for large language models,\" arXiv preprint arXiv:2402.01822, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 205, + 301, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 205, + 301, + 261 + ], + "spans": [ + { + "bbox": [ + 47, + 205, + 301, + 261 + ], + "type": "text", + "content": "[630] N. Jain, A. Schwarzschild, Y. Wen, G. Somepalli, J. Kirchenbauer, P. yeh Chiang, M. Goldblum, A. Saha, J. Geiping, and T. Goldstein, \"Baseline defenses for adversarial attacks against aligned language models,\" 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 262, + 301, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 262, + 301, + 319 + ], + "spans": [ + { + "bbox": [ + 47, + 262, + 301, + 319 + ], + "type": "text", + "content": "[631] H. Lin, Y. Lao, T. Geng, T. Yu, and W. Zhao, \"Uniguardian: A unified defense for detecting prompt injection, backdoor attacks and adversarial attacks in large language models,\" arXiv preprint arXiv:2502.13141, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 319, + 301, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 319, + 301, + 376 + ], + "spans": [ + { + "bbox": [ + 47, + 319, + 301, + 376 + ], + "type": "text", + "content": "[632] Z. Hu, G. Wu, S. Mitra, R. Zhang, T. Sun, H. Huang, and V. Swaminathan, \"Token-level adversarial prompt detection based on perplexity measures and contextual information,\" in ICLR 2025 Workshop on Building Trust in Language Models and Applications, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 377, + 301, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 377, + 301, + 435 + ], + "spans": [ + { + "bbox": [ + 47, + 377, + 301, + 435 + ], + "type": "text", + "content": "[633] Y. Gou, K. Chen, Z. Liu, L. Hong, H. Xu, Z. Li, D.-Y. Yeung, J. T. Kwok, and Y. Zhang, \"Eyes closed, safety on: Protecting multimodal llms via image-to-text transformation,\" in European Conference on Computer Vision, 2024, pp. 388-404." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 435, + 301, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 435, + 301, + 480 + ], + "spans": [ + { + "bbox": [ + 47, + 435, + 301, + 480 + ], + "type": "text", + "content": "[634] S. Armstrong, M. Franklin, C. Stevens, and R. Gorman, \"Defense against the dark prompts: Mitigating best-of-n jailbreaking with prompt evaluation,\" arXiv preprint arXiv:2107.03374, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 481, + 301, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 481, + 301, + 550 + ], + "spans": [ + { + "bbox": [ + 47, + 481, + 301, + 550 + ], + "type": "text", + "content": "[635] Y. Xie, M. Fang, R. Pi, and N. Gong, \"GradSafe: Detecting jailbreak prompts for LLMs via safety-critical gradient analysis,\" in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), L.-W. Ku, A. Martins, and V. Srikumar, Eds., 2024, pp. 507-518." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 550, + 301, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 550, + 301, + 596 + ], + "spans": [ + { + "bbox": [ + 47, + 550, + 301, + 596 + ], + "type": "text", + "content": "[636] B. Peng, Z. Bi, Q. Niu, M. Liu, P. Feng, T. Wang, L. K. Yan, Y. Wen, Y. Zhang, and C. H. Yin, \"Jailbreaking and mitigation of vulnerabilities in large language models,\" arXiv preprint arXiv:2410.15236, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 597, + 301, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 597, + 301, + 643 + ], + "spans": [ + { + "bbox": [ + 47, + 597, + 301, + 643 + ], + "type": "text", + "content": "[637] A. Kumar, C. Agarwal, S. Srinivas, A. J. Li, S. Feizi, and H. Lakkaraju, \"Certifying LLM safety against adversarial prompting,\" in First Conference on Language Modeling, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 643, + 301, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 643, + 301, + 689 + ], + "spans": [ + { + "bbox": [ + 47, + 643, + 301, + 689 + ], + "type": "text", + "content": "[638] X. Zhang, C. Zhang, T. Li, Y. Huang, X. Jia, M. Hu, J. Zhang, Y. Liu, S. Ma, and C. Shen, \"Jailguard: A universal detection framework for llm prompt-based attacks,\" arXiv preprint arXiv:2312.10766, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 689, + 301, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 301, + 734 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 301, + 734 + ], + "type": "text", + "content": "[639] Y. Liu, Y. Jia, R. Geng, J. Jia, and N. Z. Gong, \"Formalizing and benchmarking prompt injection attacks and defenses,\" in Proceedings of the 33rd USENIX Conference on Security Symposium, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 735, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 735, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 47, + 735, + 301, + 747 + ], + "type": "text", + "content": "[640] X. Suo, \"Signed-prompt: A new approach to prevent" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 747 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 77 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 77 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 77 + ], + "type": "text", + "content": "prompt injection attacks against llm-integrated applications,\" in AIP Conference Proceedings, vol. 3194, no. 1. AIP Publishing, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 77, + 564, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 77, + 564, + 134 + ], + "spans": [ + { + "bbox": [ + 310, + 77, + 564, + 134 + ], + "type": "text", + "content": "[641] L. Yan, Z. Zhang, G. Tao, K. Zhang, X. Chen, G. Shen, and X. Zhang, \"Parafuzz: An interpretability-driven technique for detecting poisoned samples in nlp,\" Advances in Neural Information Processing Systems, vol. 36, pp. 66755-66767, 2023." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 134, + 564, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 134, + 564, + 191 + ], + "spans": [ + { + "bbox": [ + 310, + 134, + 564, + 191 + ], + "type": "text", + "content": "[642] X. Hu, P.-Y. Chen, and T.-Y. Ho, \"Gradient cuff: Detecting jailbreak attacks on large language models by exploring refusal loss landscapes,\" in Advances in Neural Information Processing Systems, vol. 37, 2024, pp. 126-265-126-296." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 192, + 564, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 192, + 564, + 216 + ], + "spans": [ + { + "bbox": [ + 310, + 192, + 564, + 216 + ], + "type": "text", + "content": "[643] G. Alon and M. J. Kamfonas, \"Detecting language model attacks with perplexity,\" 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 216, + 564, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 216, + 564, + 261 + ], + "spans": [ + { + "bbox": [ + 310, + 216, + 564, + 261 + ], + "type": "text", + "content": "[644] J. Ji, B. Hou, A. Robey, G. J. Pappas, H. Hassani, Y. Zhang, E. Wong, and S. Chang, \"Defending large language models against jailbreak attacks via semantic smoothing,\" CoRR, 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 261, + 564, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 261, + 564, + 308 + ], + "spans": [ + { + "bbox": [ + 310, + 261, + 564, + 308 + ], + "type": "text", + "content": "[645] M. Phute, A. Helbling, M. Hull, S. Peng, S. Szyller, C. Cornelius, and D. H. Chau, \"Llm self defense: By self examination, llms know they are being tricked,\" arXiv preprint arXiv:2308.07308, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 308, + 564, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 308, + 564, + 354 + ], + "spans": [ + { + "bbox": [ + 310, + 308, + 564, + 354 + ], + "type": "text", + "content": "[646] L. N. Candogan, Y. Wu, E. A. Rocamora, G. G. Chrysos, and V. Cevher, \"Single-pass detection of jailbreaking input in large language models,\" arXiv preprint arXiv:2502.15435, 2025." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 354, + 564, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 354, + 564, + 423 + ], + "spans": [ + { + "bbox": [ + 310, + 354, + 564, + 423 + ], + "type": "text", + "content": "[647] B. Cao, Y. Cao, L. Lin, and J. Chen, “Defending against alignment-breaking attacks via robustly aligned LLM,” in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), L.-W. Ku, A. Martins, and V. Srikumar, Eds., 2024, pp. 10542-10560." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 423, + 564, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 423, + 564, + 470 + ], + "spans": [ + { + "bbox": [ + 310, + 423, + 564, + 470 + ], + "type": "text", + "content": "[648] Y. Zhang, L. Ding, L. Zhang, and D. Tao, \"Intention analysis makes LLMs a good jailbreak defender,\" in Proceedings of the 31st International Conference on Computational Linguistics, 2025, pp. 2947-2968." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 470, + 564, + 537 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 470, + 564, + 537 + ], + "spans": [ + { + "bbox": [ + 310, + 470, + 564, + 537 + ], + "type": "text", + "content": "[649] S. Han, K. Rao, A. Ettinger, L. Jiang, B. Y. Lin, N. Lambert, Y. Choi, and N. Dziri, \"Wildguard: Open one-stop moderation tools for safety risks, jailbreaks, and refusals of llms,\" in The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 538, + 564, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 538, + 564, + 584 + ], + "spans": [ + { + "bbox": [ + 310, + 538, + 564, + 584 + ], + "type": "text", + "content": "[650] M. Pisano, P. Ly, A. Sanders, B. Yao, D. Wang, T. Strzalkowski, and M. Si, \"Bergeron: Combating adversarial attacks through a conscience-based alignment framework,\" arXiv preprint arXiv:2312.00029, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 585, + 564, + 630 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 585, + 564, + 630 + ], + "spans": [ + { + "bbox": [ + 310, + 585, + 564, + 630 + ], + "type": "text", + "content": "[651] A. Robey, E. Wong, H. Hassani, and G. J. Pappas, \"Smoothllm: Defending large language models against jailbreaking attacks,\" arXiv preprint arXiv:2310.03684, 2023." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 632, + 564, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 632, + 564, + 712 + ], + "spans": [ + { + "bbox": [ + 310, + 632, + 564, + 712 + ], + "type": "text", + "content": "[652] J. Ji, B. Hou, Z. Zhang, G. Zhang, W. Fan, Q. Li, Y. Zhang, G. Liu, S. Liu, and S. Chang, \"Advancing the robustness of large language models through self-denoised smoothing,\" in Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers), 2024, pp. 246-257." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 712, + 564, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 712, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 310, + 712, + 564, + 747 + ], + "type": "text", + "content": "[653] J. Yi, Y. Xie, B. Zhu, K. Hines, E. Kiciman, G. Sun, X. Xie, and F. Wu, \"Benchmarking and defending against indirect prompt injection attacks on large lan" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "58" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 57 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 71, + 42, + 192, + 54 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 192, + 54 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 192, + 54 + ], + "type": "text", + "content": "guage models,\" CoRR, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 54, + 301, + 100 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 54, + 301, + 100 + ], + "spans": [ + { + "bbox": [ + 47, + 54, + 301, + 100 + ], + "type": "text", + "content": "[654] X. Song, S. Duan, and G. Liu, \"Alis: Aligned llm instruction security strategy for unsafe input prompt,\" in Proceedings of the 31st International Conference on Computational Linguistics, 2025, pp. 9124-9146." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 100, + 301, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 100, + 301, + 157 + ], + "spans": [ + { + "bbox": [ + 47, + 100, + 301, + 157 + ], + "type": "text", + "content": "[655] Y. Wang, Z. Shi, A. Bai, and C.-J. Hsieh, \"Defending Ilms against jailbreaking attacks via backtranslation,\" in Findings of the Association for Computational Linguistics: ACL 2024, L.-W. Ku, A. Martins, and V. Srikumar, Eds., 2024, pp. 16031-16046." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 157, + 301, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 157, + 301, + 192 + ], + "spans": [ + { + "bbox": [ + 47, + 157, + 301, + 192 + ], + "type": "text", + "content": "[656] E. Zverev, S. Abdelnabi, M. Fritz, and C. H. Lampert, \"Can LLMs separate instructions from data? and what do we even mean by that?\" CoRR, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 192, + 301, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 192, + 301, + 237 + ], + "spans": [ + { + "bbox": [ + 47, + 192, + 301, + 237 + ], + "type": "text", + "content": "[657] Y. Dong, R. Mu, G. Jin, Y. Qi, J. Hu, X. Zhao, J. Meng, W. Ruan, and X. Huang, \"Building guardrails for large language models,\" arXiv preprint arXiv:2402.01822, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 238, + 301, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 238, + 301, + 297 + ], + "spans": [ + { + "bbox": [ + 47, + 238, + 301, + 297 + ], + "type": "text", + "content": "[658] D. Kumar, Y. A. AbuHashem, and Z. Durmeric, \"Watch your language: Investigating content moderation with large language models,\" in Proceedings of the International AAAI Conference on Web and Social Media, vol. 18, 2024, pp. 865-878." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 297, + 301, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 297, + 301, + 365 + ], + "spans": [ + { + "bbox": [ + 47, + 297, + 301, + 365 + ], + "type": "text", + "content": "[659] T. Rebedea, R. Dinu, M. N. Sreedhar, C. Parisien, and J. Cohen, \"Nemo guardrails: A toolkit for controllable and safe llm applications with programmable rails,\" in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, 2023, pp. 431-445." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 365, + 301, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 365, + 301, + 411 + ], + "spans": [ + { + "bbox": [ + 47, + 365, + 301, + 411 + ], + "type": "text", + "content": "[660] OpenAI, \"Improving model safety behavior with rule-based rewards,\" https://openai.com/index/improving-model-safety-behavior-with-rule-based-re 2025, accessed: 2025-03-24." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 412, + 301, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 412, + 301, + 458 + ], + "spans": [ + { + "bbox": [ + 47, + 412, + 301, + 458 + ], + "type": "text", + "content": "[661] H. Ma, C. Zhang, H. Fu, P. Zhao, and B. Wu, \"Adapting large language models for content moderation: Pitfalls in data engineering and supervised fine-tuning,\" arXiv preprint arXiv:2310.03400, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 458, + 301, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 458, + 301, + 504 + ], + "spans": [ + { + "bbox": [ + 47, + 458, + 301, + 504 + ], + "type": "text", + "content": "[662] M. Phute, A. Helbling, M. Hull, S. Peng, S. Szyller, C. Cornelius, and D. H. Chau, \"Llm self defense: By self examination, llms know they are being tricked,\" arXiv preprint arXiv:2308.07308, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 504, + 301, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 504, + 301, + 550 + ], + "spans": [ + { + "bbox": [ + 47, + 504, + 301, + 550 + ], + "type": "text", + "content": "[663] Z. Gou, Z. Shao, Y. Gong, Y. Shen, Y. Yang, N. Duan, and W. Chen, \"Critic: Large language models can self-correct with tool-interactive critiquing,\" arXiv preprint arXiv:2305.11738, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 550, + 301, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 550, + 301, + 608 + ], + "spans": [ + { + "bbox": [ + 47, + 550, + 301, + 608 + ], + "type": "text", + "content": "[664] C. Lu, S. Holt, C. Fanconi, A. J. Chan, J. Foerster, M. van der Schaar, and R. T. Lange, \"Discovering preference optimization algorithms with and for large language models,\" in Advances in Neural Information Processing Systems, vol. 37, 2024, pp. 86528-86573." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 608, + 301, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 608, + 301, + 666 + ], + "spans": [ + { + "bbox": [ + 47, + 608, + 301, + 666 + ], + "type": "text", + "content": "[665] A. Madaan, N. Tandon, P. Gupta, S. Hallinan, L. Gao, S. Wiegreffe, U. Alon, N. Dziri, S. Prabhumoye, Y. Yang et al., \"Self-refine: Iterative refinement with self-feedback,\" Advances in Neural Information Processing Systems, vol. 36, pp. 46534-46594, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 666, + 301, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 666, + 301, + 734 + ], + "spans": [ + { + "bbox": [ + 47, + 666, + 301, + 734 + ], + "type": "text", + "content": "[666] D. Jiang, X. Ren, and B. Y. Lin, \"Llm-blender: Ensemble large language models with pairwise ranking and generative fusion,\" in Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2023, pp. 14165-14178." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 734, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 734, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 734, + 301, + 746 + ], + "type": "text", + "content": "[667] Z. Lai, X. Zhang, and S. Chen, \"Adaptive ensembles" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 78 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 78 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 78 + ], + "type": "text", + "content": "of fine-tuned transformers for llm-generated text detection,\" in 2024 International Joint Conference on Neural Networks. IEEE, 2024, pp. 1-7." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 77, + 564, + 122 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 77, + 564, + 122 + ], + "spans": [ + { + "bbox": [ + 310, + 77, + 564, + 122 + ], + "type": "text", + "content": "[668] C. Xiong, X. Qi, P.-Y. Chen, and T.-Y. Ho, \"Defensive prompt patch: A robust and interpretable defense of llms against jailbreak attacks,\" arXiv preprint arXiv:2405.20099, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 122, + 564, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 122, + 564, + 169 + ], + "spans": [ + { + "bbox": [ + 310, + 122, + 564, + 169 + ], + "type": "text", + "content": "[669] Z. Zhang, Q. Zhang, and J. Foerster, “Parden, can you repeat that? defending against jailbreaks via repetition,” in Proceedings of the 41st International Conference on Machine Learning, 2024, pp. 60271-60287." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 169, + 564, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 169, + 564, + 227 + ], + "spans": [ + { + "bbox": [ + 310, + 169, + 564, + 227 + ], + "type": "text", + "content": "[670] Z. Yuan, Z. Xiong, Y. Zeng, N. Yu, R. Jia, D. Song, and B. Li, \"Rigorllm: resilient guardrails for large language models against undesired content,\" in Proceedings of the 41st International Conference on Machine Learning, 2024, pp. 57-953-57-965." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 227, + 564, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 227, + 564, + 274 + ], + "spans": [ + { + "bbox": [ + 310, + 227, + 564, + 274 + ], + "type": "text", + "content": "[671] M. Cao, M. Fatemi, J. C. Cheung, and S. Shabanian, \"Systematic rectification of language models via dead-end analysis,\" in The Eleventh International Conference on Learning Representations, 2023." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 274, + 564, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 274, + 564, + 319 + ], + "spans": [ + { + "bbox": [ + 310, + 274, + 564, + 319 + ], + "type": "text", + "content": "[672] F. Faal, K. Schmitt, and J. Y. Yu, \"Reward modeling for mitigating toxicity in transformer-based language models,\" Applied Intelligence, vol. 53, no. 7, p. 8421-8435, 2022." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 319, + 564, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 319, + 564, + 376 + ], + "spans": [ + { + "bbox": [ + 310, + 319, + 564, + 376 + ], + "type": "text", + "content": "[673] W. Zeng, Y. Liu, R. Mullins, L. Peran, J. Fernandez, H. Harkous, K. Narasimhan, D. Proud, P. Kumar, B. Radharapu et al., \"Shieldgemma: Generative ai content moderation based on gemma,\" arXiv preprint arXiv:2407.21772, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 376, + 564, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 376, + 564, + 435 + ], + "spans": [ + { + "bbox": [ + 310, + 376, + 564, + 435 + ], + "type": "text", + "content": "[674] Z. Wang, F. Yang, L. Wang, P. Zhao, H. Wang, L. Chen, *ards/, Q. Lin, and K.-F. Wong, \"SELF-GUARD: Empower the LLM to safeguard itself,\" in *Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics*, 2024, pp. 1648-1668." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 435, + 564, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 435, + 564, + 480 + ], + "spans": [ + { + "bbox": [ + 310, + 435, + 564, + 480 + ], + "type": "text", + "content": "[675] S. Ghosh, P. Varshney, E. Galinkin, and C. Parisien, \"Aegis: Online adaptive ai content safety moderation with ensemble of llm experts,\" arXiv preprint arXiv:2404.05993, 2024." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 481, + 564, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 481, + 564, + 561 + ], + "spans": [ + { + "bbox": [ + 310, + 481, + 564, + 561 + ], + "type": "text", + "content": "[676] W. Wang, J.-T. Huang, W. Wu, J. Zhang, Y. Huang, S. Li, P. He, and M. R. Lyu, \"Mttm: Metamorphic testing for textual content moderation software,\" 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE), pp. 2387-2399, 2023. [Online]. Available: https://api-semanticscholar.org/ CorpusID:256826966" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 561, + 564, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 561, + 564, + 596 + ], + "spans": [ + { + "bbox": [ + 310, + 561, + 564, + 596 + ], + "type": "text", + "content": "[677] K.-L. Chiu, A. Collins, and R. Alexander, \"Detecting hate speech with gpt-3,\" arXiv preprint arXiv:2103.12407, 2021." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 596, + 564, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 596, + 564, + 642 + ], + "spans": [ + { + "bbox": [ + 310, + 596, + 564, + 642 + ], + "type": "text", + "content": "[678] J. Kim, A. Derakhshan, and I. G. Harris, \"Robust safety classifier for large language models: Adversarial prompt shield,\" arXiv preprint arXiv:2311.00172, 2023." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 642, + 564, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 642, + 564, + 700 + ], + "spans": [ + { + "bbox": [ + 310, + 642, + 564, + 700 + ], + "type": "text", + "content": "[679] B. Krause, A. D. Gotmare, B. McCann, N. S. Keskar, S. Joty, R. Socher, and N. F. Rajani, \"Gedi: Generative discriminator guided sequence generation,\" in Findings of the Association for Computational Linguistics: EMNLP 2021, 2021, pp. 4929-4952." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 700, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 700, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 700, + 564, + 746 + ], + "type": "text", + "content": "[680] Q. Liu, Z. Zhou, L. He, Y. Liu, W. Zhang, and S. Su, \"Alignment-enhanced decoding: Defending jailbreaks via token-level adaptive refining of probability distributions,\" in Proceedings of the 2024 Conference on" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "59" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 58 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 72, + 42, + 301, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 42, + 301, + 65 + ], + "spans": [ + { + "bbox": [ + 72, + 42, + 301, + 65 + ], + "type": "text", + "content": "Empirical Methods in Natural Language Processing, 2024, pp. 2802-2816." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 66, + 301, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 66, + 301, + 133 + ], + "spans": [ + { + "bbox": [ + 47, + 66, + 301, + 133 + ], + "type": "text", + "content": "[681] A. Liu, M. Sap, X. Lu, S. Swayamdipta, C. Bhagavatula, N. A. Smith, and Y. Choi, \"Dexperts: Decoding-time controlled text generation with experts and anti-experts,\" in Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics, 2021, pp. 6691-6706." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 134, + 301, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 134, + 301, + 179 + ], + "spans": [ + { + "bbox": [ + 47, + 134, + 301, + 179 + ], + "type": "text", + "content": "[682] T. Radcliffe, E. Lockhart, and J. Wetherington, \"Automated prompt engineering for semantic vulnerabilities in large language models,\" Authorea Preprints, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 180, + 301, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 180, + 301, + 227 + ], + "spans": [ + { + "bbox": [ + 47, + 180, + 301, + 227 + ], + "type": "text", + "content": "[683] F. Trad and A. Chehab, \"Prompt engineering or finetuning? a case study on phishing detection with large language models,\" Machine Learning and Knowledge Extraction, vol. 6, no. 1, pp. 367-384, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 228, + 301, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 228, + 301, + 284 + ], + "spans": [ + { + "bbox": [ + 47, + 228, + 301, + 284 + ], + "type": "text", + "content": "[684] A. Zhou, B. Li, and H. Wang, \"Robust prompt optimization for defending language models against jailbreaking attacks,\" in Advances in Neural Information Processing Systems, vol. 37. Curran Associates, Inc., 2024, pp. 40184-40211." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 285, + 301, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 285, + 301, + 331 + ], + "spans": [ + { + "bbox": [ + 47, + 285, + 301, + 331 + ], + "type": "text", + "content": "[685] Y. Mo, Y. Wang, Z. Wei, and Y. Wang, \"Fight back against jailbreaking via prompt adversarial tuning,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 331, + 301, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 331, + 301, + 376 + ], + "spans": [ + { + "bbox": [ + 47, + 331, + 301, + 376 + ], + "type": "text", + "content": "[686] Y. Zhang, L. Ding, L. Zhang, and D. Tao, \"Intention analysis makes lms a good jailbreak defender,\" in Proceedings of the 31st International Conference on Computational Linguistics, 2025, pp. 2947-2968." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 377, + 301, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 377, + 301, + 422 + ], + "spans": [ + { + "bbox": [ + 47, + 377, + 301, + 422 + ], + "type": "text", + "content": "[687] Y. Chen, H. Li, Z. Zheng, Y. Song, D. Wu, and B. Hooi, \"Defense against prompt injection attack by leveraging attack techniques,\" arXiv preprint arXiv:2411.00459, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 423, + 301, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 423, + 301, + 480 + ], + "spans": [ + { + "bbox": [ + 47, + 423, + 301, + 480 + ], + "type": "text", + "content": "[688] Z. Zhang, J. Yang, P. Ke, F. Mi, H. Wang, and M. Huang, \"Defending large language models against jailbreaking attacks through goal prioritization,\" in Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics, 2023, pp. 8865-8887." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 481, + 301, + 527 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 481, + 301, + 527 + ], + "spans": [ + { + "bbox": [ + 47, + 481, + 301, + 527 + ], + "type": "text", + "content": "[689] Y. Xie, J. Yi, J. Shao, J. Curl, L. Lyu, Q. Chen, X. Xie, and F. Wu, \"Defending chatgpt against jailbreak attack via self-reminders,\" Nature Machine Intelligence, vol. 5, no. 12, pp. 1486–1496, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 527, + 301, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 527, + 301, + 561 + ], + "spans": [ + { + "bbox": [ + 47, + 527, + 301, + 561 + ], + "type": "text", + "content": "[690] S. Chen, J. Piet, C. Sitawarin, and D. Wagner, \"Struq: Defending against prompt injection with structured queries,\" arXiv preprint arXiv:2402.06363, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 562, + 301, + 607 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 562, + 301, + 607 + ], + "spans": [ + { + "bbox": [ + 47, + 562, + 301, + 607 + ], + "type": "text", + "content": "[691] K. Hines, G. Lopez, M. Hall, F. Zarfati, Y. Zunger, and E. Kiciman, \"Defending against indirect prompt injection attacks with spotlighting,\" arXiv preprint arXiv:2403.14720, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 608, + 301, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 608, + 301, + 653 + ], + "spans": [ + { + "bbox": [ + 47, + 608, + 301, + 653 + ], + "type": "text", + "content": "[692] S. Slocum and D. Hadfield-Menell, \"Inverse prompt engineering for task-specific LLM safety,\" 2025. [Online]. Available: https://openreview.net/forum? id=3MDmM0rMPQ" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 654, + 301, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 654, + 301, + 688 + ], + "spans": [ + { + "bbox": [ + 47, + 654, + 301, + 688 + ], + "type": "text", + "content": "[693] K. Edemacu and X. Wu, \"Privacy preserving prompt engineering: A survey,\" arXiv preprint arXiv:2404.06001, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 689, + 301, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 301, + 734 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 301, + 734 + ], + "type": "text", + "content": "[694] S. Utpala, S. Hooker, and P.-Y. Chen, \"Locally differentially private document generation using zero shot prompting,\" in Findings of the Association for Computational Linguistics: EMNLP 2023, 2023, pp. 8442-8457." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 735, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 735, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 735, + 301, + 746 + ], + "type": "text", + "content": "[695] H. Duan, A. Dziedzic, N. Papernot, and F. Boenisch," + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 311, + 42, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 87 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 87 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 87 + ], + "type": "text", + "content": "\"Flocks of stochastic parrots: Differentially private prompt learning for large language models,\" Advances in Neural Information Processing Systems, vol. 36, pp. 76852-76871, 2023." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 311, + 88, + 564, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 88, + 564, + 157 + ], + "spans": [ + { + "bbox": [ + 311, + 88, + 564, + 157 + ], + "type": "text", + "content": "[696] W. Wang, W. Jiao, J. Huang, R. Dai, J.-T. Huang, Z. Tu, and M. R. Lyu, \"Not all countries celebrate thanksgiving: On the cultural dominance in large language models,\" ArXiv, vol. abs/2310.12481, 2023. [Online]. Available: https://api_semanticscholar.org/ CorpusID:264305810" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 311, + 158, + 564, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 158, + 564, + 203 + ], + "spans": [ + { + "bbox": [ + 311, + 158, + 564, + 203 + ], + "type": "text", + "content": "[697] M. Kaneko, D. Bollegala, N. Okazaki, and T. Baldwin, \"Evaluating gender bias in large language models via chain-of-thought prompting,\" arXiv preprint arXiv:2401.15585, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 311, + 204, + 564, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 204, + 564, + 262 + ], + "spans": [ + { + "bbox": [ + 311, + 204, + 564, + 262 + ], + "type": "text", + "content": "[698] X. He, S. Zannettou, Y. Shen, and Y. Zhang, \"You only prompt once: On the capabilities of prompt learning on large language models to tackle toxic content,\" in 2024 IEEE Symposium on Security and Privacy (SP). IEEE, 2024, pp. 770-787." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 311, + 262, + 564, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 262, + 564, + 296 + ], + "spans": [ + { + "bbox": [ + 311, + 262, + 564, + 296 + ], + "type": "text", + "content": "[699] X. Zou, Y. Chen, and K. Li, \"Is the system message really important to jailbreaks in large language models?\" arXiv preprint arXiv:2402.14857, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 311, + 297, + 564, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 297, + 564, + 342 + ], + "spans": [ + { + "bbox": [ + 311, + 297, + 564, + 342 + ], + "type": "text", + "content": "[700] R. Xu, Z. Qi, and W. Xu, \"Preemptive answer \"attacks\" on chain-of-thought reasoning,\" in Findings of the Association for Computational Linguistics ACL 2024, 2024, pp. 14708-14726." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 311, + 342, + 564, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 342, + 564, + 412 + ], + "spans": [ + { + "bbox": [ + 311, + 342, + 564, + 412 + ], + "type": "text", + "content": "[701] C. Zheng, F. Yin, H. Zhou, F. Meng, J. Zhou, K.-W. Chang, M. Huang, and N. Peng, \"On prompt-driven safeguarding for large language models,\" in Proceedings of the 41st International Conference on Machine Learning, ser. Proceedings of Machine Learning Research, vol. 235, 21-27 Jul 2024, pp. 61-613." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 311, + 412, + 564, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 412, + 564, + 469 + ], + "spans": [ + { + "bbox": [ + 311, + 412, + 564, + 469 + ], + "type": "text", + "content": "[702] Y. Wang, X. Liu, Y. Li, M. Chen, and C. Xiao, \"Adashield: Safeguarding multimodal large language models from structure-based attack via adaptive shield prompting,\" in European Conference on Computer Vision. Springer, 2024, pp. 77-94." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 311, + 470, + 564, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 470, + 564, + 514 + ], + "spans": [ + { + "bbox": [ + 311, + 470, + 564, + 514 + ], + "type": "text", + "content": "[703] Z. Shi, Z. Wang, Y. Su, W. Luo, H. Gao, F. Yang, R. Tang, and Y. Zhang, \"Robustness-aware automatic prompt optimization,\" arXiv preprint arXiv:2412.18196, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 311, + 515, + 564, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 515, + 564, + 573 + ], + "spans": [ + { + "bbox": [ + 311, + 515, + 564, + 573 + ], + "type": "text", + "content": "[704] Y. Wu, Y. Gao, B. Zhu, Z. Zhou, X. Sun, S. Yang, J.-G. Lou, Z. Ding, and L. Yang, \"Strago: Harnessing strategic guidance for prompt optimization,\" in Findings of the Association for Computational Linguistics: EMNLP 2024, 2024, pp. 10043-10061." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 311, + 574, + 564, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 574, + 564, + 618 + ], + "spans": [ + { + "bbox": [ + 311, + 574, + 564, + 618 + ], + "type": "text", + "content": "[705] F. Wu, N. Zhang, S. Jha, P. McDaniel, and C. Xiao, \"A new era in llm security: Exploring security concerns in real-world llm-based systems,\" arXiv preprint arXiv:2402.18649, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 311, + 619, + 564, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 619, + 564, + 689 + ], + "spans": [ + { + "bbox": [ + 311, + 619, + 564, + 689 + ], + "type": "text", + "content": "[706] A. Borzunov, M. Ryabinin, A. Chumachenko, D. Baranchuk, T. Dettmers, Y. Belkada, P. Samygin, and C. A. Raffel, \"Distributed inference and finetuning of large language models over the internet,\" Advances in neural information processing systems, vol. 36, pp. 12312-12331, 2023." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 311, + 689, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 689, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 311, + 689, + 564, + 746 + ], + "type": "text", + "content": "[707] A. Agrawal, N. Kedia, A. Panwar, J. Mohan, N. Kwa-tra, B. Gulavani, A. Tumanov, and R. Ramjee, \"Taming {Throughput-Latency} tradeoff in {LLM} inference with {Sarathi-Serve}\", in 18th USENIX Symposium on Operating Systems Design and Implementation (OSDI" + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "60" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 59 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 71, + 42, + 167, + 53 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 167, + 53 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 167, + 53 + ], + "type": "text", + "content": "24), 2024, pp. 117-134." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 54, + 301, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 54, + 301, + 123 + ], + "spans": [ + { + "bbox": [ + 47, + 54, + 301, + 123 + ], + "type": "text", + "content": "[708] Y. Zhong, S. Liu, J. Chen, J. Hu, Y. Zhu, X. Liu, X. Jin, and H. Zhang, \" " + }, + { + "bbox": [ + 47, + 54, + 301, + 123 + ], + "type": "inline_equation", + "content": "\\{\\mathrm{DistServe}\\}" + }, + { + "bbox": [ + 47, + 54, + 301, + 123 + ], + "type": "text", + "content": ": Disaggregating prefill and decoding for goodput-optimized large language model serving,\" in 18th USENIX Symposium on Operating Systems Design and Implementation (OSDI 24), 2024, pp. 193-210." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 124, + 301, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 124, + 301, + 169 + ], + "spans": [ + { + "bbox": [ + 47, + 124, + 301, + 169 + ], + "type": "text", + "content": "[709] H. Sun, Z. Chen, X. Yang, Y. Tian, and B. Chen, \"Tri force: Lossless acceleration of long sequence generation with hierarchical speculative decoding,\" in First Conference on Language Modeling, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 170, + 301, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 170, + 301, + 227 + ], + "spans": [ + { + "bbox": [ + 47, + 170, + 301, + 227 + ], + "type": "text", + "content": "[710] T. Cai, Y. Li, Z. Geng, H. Peng, J. D. Lee, D. Chen, and T. Dao, \"Medusa: Simple LLM inference acceleration framework with multiple decoding heads,\" in Proceedings of the 41st International Conference on Machine Learning, vol. 235. PMLR, 2024, pp. 5209-5235." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 228, + 301, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 228, + 301, + 284 + ], + "spans": [ + { + "bbox": [ + 47, + 228, + 301, + 284 + ], + "type": "text", + "content": "[711] J. Chen, V. Tiwari, R. Sadhukhan, Z. Chen, J. Shi, I. E.-H. Yen, and B. Chen, \"Magicdec: Breaking the latency-throughput tradeoff for long context generation with speculative decoding,\" arXiv preprint arXiv:2408.11049, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 285, + 301, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 285, + 301, + 353 + ], + "spans": [ + { + "bbox": [ + 47, + 285, + 301, + 353 + ], + "type": "text", + "content": "[712] C. Holmes, M. Tanaka, M. Wyatt, A. A. Awan, J. Rasley, S. Rajbhandari, R. Y. Aminabadi, H. Qin, A. Bakhtiari, L. Kurilenko et al., \"Deepspeed-fastgen: High-throughput text generation for llms via mii and deepspeed-inference,\" arXiv preprint arXiv:2401.08671, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 354, + 301, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 354, + 301, + 412 + ], + "spans": [ + { + "bbox": [ + 47, + 354, + 301, + 412 + ], + "type": "text", + "content": "[713] R. Svirschevski, A. May, Z. Chen, B. Chen, Z. Jia, and M. Ryabinin, \"Specexec: Massively parallel speculative decoding for interactive lmm inference on consumer devices,\" Advances in Neural Information Processing Systems, vol. 37, pp. 16342-16368, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 412, + 301, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 412, + 301, + 480 + ], + "spans": [ + { + "bbox": [ + 47, + 412, + 301, + 480 + ], + "type": "text", + "content": "[714] P. Wang, D. Zhang, L. Li, C. Tan, X. Wang, M. Zhang, K. Ren, B. Jiang, and X. Qiu, \"Inferaligner: Inference-time alignment for harmlessness through cross-model guidance,\" in Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, 2024, pp. 10460-10479." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 481, + 301, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 481, + 301, + 526 + ], + "spans": [ + { + "bbox": [ + 47, + 481, + 301, + 526 + ], + "type": "text", + "content": "[715] X. Wang, D. Wu, Z. Ji, Z. Li, P. Ma, S. Wang, Y. Li, Y. Liu, N. Liu, and J. Rahmel, \"Selfdefend: Llms can defend themselves against jailbreaking in a practical manner,\" CoRR, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 526, + 301, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 526, + 301, + 572 + ], + "spans": [ + { + "bbox": [ + 47, + 526, + 301, + 572 + ], + "type": "text", + "content": "[716] X. Hu, P.-Y. Chen, and T.-Y. Ho, \"Gradient cuff: Detecting jailbreak attacks on large language models by exploring refusal loss landscapes,\" arXiv preprint arXiv:2403.00867, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 573, + 301, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 573, + 301, + 608 + ], + "spans": [ + { + "bbox": [ + 47, + 573, + 301, + 608 + ], + "type": "text", + "content": "[717] R. K. Sharma, V. Gupta, and D. Grossman, \"Spml: A dsl for defending language models against prompt attacks,\" arXiv preprint arXiv:2402.11755, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 608, + 301, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 608, + 301, + 677 + ], + "spans": [ + { + "bbox": [ + 47, + 608, + 301, + 677 + ], + "type": "text", + "content": "[718] J. Zhao, S. Wang, Y. Zhao, X. Hou, K. Wang, P. Gao, Y. Zhang, C. Wei, and H. Wang, \"Models are codes: Towards measuring malicious code poisoning attacks on pre-trained model hubs,\" in Proceedings of the 39th IEEE/ACM International Conference on Automated Software Engineering, 2024, pp. 2087-2098." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 677, + 301, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 301, + 723 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 301, + 723 + ], + "type": "text", + "content": "[719] S. Ghosh, P. Varshney, E. Galinkin, and C. Parisien, \"Aegis: Online adaptive ai content safety moderation with ensemble of llm experts,\" arXiv preprint arXiv:2404.05993, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 723, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 723, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 723, + 301, + 746 + ], + "type": "text", + "content": "[720] S. Ghosh, P. Varshney, M. N. Sreedhar, A. Padmakumar, T. Rebedea, J. R. Varghese, and C. Parisien," + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 77 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 77 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 77 + ], + "type": "text", + "content": "\"Aegis2.0: A diverse ai safety dataset and risks taxonomy for alignment of llm guardrails,\" in Neurips Safe Generative AI Workshop 2024, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 310, + 77, + 564, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 77, + 564, + 123 + ], + "spans": [ + { + "bbox": [ + 310, + 77, + 564, + 123 + ], + "type": "text", + "content": "[721] S. Han, K. Rao, A. Ettinger, L. Jiang, B. Y. Lin, N. Lambert, Y. Choi, and N. Dziri, \"Wildguard: Open one-stop moderation tools for safety risks, jailbreaks, and refusals of llms,\" arXiv preprint arXiv:2406.18495, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 124, + 564, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 124, + 564, + 180 + ], + "spans": [ + { + "bbox": [ + 310, + 124, + 564, + 180 + ], + "type": "text", + "content": "[722] W. Zeng, Y. Liu, R. Mullins, L. Peran, J. Fernandez, H. Harkous, K. Narasimhan, D. Proud, P. Kumar, B. Radharapu et al., \"Shieldgemma: Generative ai content moderation based on gemma,\" arXiv preprint arXiv:2407.21772, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 181, + 564, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 181, + 564, + 227 + ], + "spans": [ + { + "bbox": [ + 310, + 181, + 564, + 227 + ], + "type": "text", + "content": "[723] Y. Liu, H. Gao, S. Zhai, J. Xia, T. Wu, Z. Xue, Y. Chen, K. Kawaguchi, J. Zhang, and B. Hooi, \"Guardreasoner: Towards reasoning-based llm safeguards,\" arXiv preprint arXiv:2501.18492, 2025." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 228, + 564, + 261 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 228, + 564, + 261 + ], + "spans": [ + { + "bbox": [ + 310, + 228, + 564, + 261 + ], + "type": "text", + "content": "[724] C. Wang, Y. Liu, B. Li, D. Zhang, Z. Li, and J. Fang, \"Safety in large reasoning models: A survey,\" arXiv preprint arXiv:2504.17704, 2025." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 262, + 564, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 262, + 564, + 308 + ], + "spans": [ + { + "bbox": [ + 310, + 262, + 564, + 308 + ], + "type": "text", + "content": "[725] H. Jin, A. Zhou, J. Menke, and H. Wang, \"Jailbreaking large language models against moderation guardrails via cipher characters,\" Advances in Neural Information Processing Systems, vol. 37, pp. 59408-59435, 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 308, + 564, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 308, + 564, + 354 + ], + "spans": [ + { + "bbox": [ + 310, + 308, + 564, + 354 + ], + "type": "text", + "content": "[726] D. Ran, J. Liu, Y. Gong, J. Zheng, X. He, T. Cong, and A. Wang, \"Jailbreak: An integrated toolkit for evaluating jailbreak attempts against large language models,\" arXiv preprint arXiv:2406.09321, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 354, + 564, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 354, + 564, + 399 + ], + "spans": [ + { + "bbox": [ + 310, + 354, + 564, + 399 + ], + "type": "text", + "content": "[727] H. Qiu, S. Zhang, A. Li, H. He, and Z. Lan, \"Latent jailbreak: A benchmark for evaluating text safety and output robustness of large language models,\" arXiv preprint arXiv:2307.08487, 2023." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 399, + 564, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 399, + 564, + 469 + ], + "spans": [ + { + "bbox": [ + 310, + 399, + 564, + 469 + ], + "type": "text", + "content": "[728] K. Zhu, J. Wang, J. Zhou, Z. Wang, H. Chen, Y. Wang, L. Yang, W. Ye, Y. Zhang, N. Gong et al., \"Promptrobust: Towards evaluating the robustness of large language models on adversarial prompts,\" in Proceedings of the 1st ACM Workshop on Large AI Systems and Models with Privacy and Safety Analysis, 2023, pp. 57-68." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 470, + 564, + 526 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 470, + 564, + 526 + ], + "spans": [ + { + "bbox": [ + 310, + 470, + 564, + 526 + ], + "type": "text", + "content": "[729] A. Pei, Z. Yang, S. Zhu, R. Cheng, and J. Jia, \"Selfprompt: Autonomously evaluating llm robustness via domain-constrained knowledge guidelines and refined adversarial prompts,\" arXiv preprint arXiv:2412.00765, 2024." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 526, + 564, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 526, + 564, + 572 + ], + "spans": [ + { + "bbox": [ + 310, + 526, + 564, + 572 + ], + "type": "text", + "content": "[730] Z. Xu, Y. Liu, G. Deng, Y. Li, and S. Picek, \"A comprehensive study of jailbreak attack versus defense for large language models,\" arXiv preprint arXiv:2402.13457, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 573, + 564, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 573, + 564, + 618 + ], + "spans": [ + { + "bbox": [ + 310, + 573, + 564, + 618 + ], + "type": "text", + "content": "[731] K. Chen, Y. Liu, D. Wang, J. Chen, and W. Wang, \"Characterizing and evaluating the reliability of llms against jailbreak attacks,\" arXiv preprint arXiv:2408.09326, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 619, + 564, + 676 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 619, + 564, + 676 + ], + "spans": [ + { + "bbox": [ + 310, + 619, + 564, + 676 + ], + "type": "text", + "content": "[732] B. Wang, C. Xu, S. Wang, Z. Gan, Y. Cheng, J. Gao, A. H. Awadallah, and B. Li, \"Adversarial glue: A multi-task benchmark for robustness evaluation of language models,\" arXiv preprint arXiv:2111.02840, 2021." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 677, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 677, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 677, + 564, + 746 + ], + "type": "text", + "content": "[733] G. Dong, J. Zhao, T. Hui, D. Guo, W. Wang, B. Feng, Y. Qiu, Z. Gongque, K. He, Z. Wang et al., \"Revisit input perturbation problems for llms: A unified robustness evaluation framework for noisy slot filling task,\" in CCF International Conference on Natural Language Processing and Chinese Computing. Springer," + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "type": "text", + "content": "61" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 60 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 71, + 42, + 149, + 54 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 149, + 54 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 149, + 54 + ], + "type": "text", + "content": "2023, pp. 682-694." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 55, + 301, + 88 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 55, + 301, + 88 + ], + "spans": [ + { + "bbox": [ + 47, + 55, + 301, + 88 + ], + "type": "text", + "content": "[734] J. Zheng, A. Ritter, and W. Xu, \"Neo-bench: Evaluating robustness of large language models with neologisms,\" arXiv preprint arXiv:2402.12261, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 89, + 301, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 89, + 301, + 133 + ], + "spans": [ + { + "bbox": [ + 47, + 89, + 301, + 133 + ], + "type": "text", + "content": "[735] Y. Li, Y. Guo, F. Guerin, and C. Lin, \"Evaluating large language models for generalization and robustness via data compression,\" arXiv preprint arXiv:2402.00861, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 134, + 301, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 134, + 301, + 191 + ], + "spans": [ + { + "bbox": [ + 47, + 134, + 301, + 191 + ], + "type": "text", + "content": "[736] Q. Zhang, H. Qiu, D. Wang, Y. Li, T. Zhang, W. Zhu, H. Weng, L. Yan, and C. Zhang, “A benchmark for semantic sensitive information in llms outputs,” in The Thirteenth International Conference on Learning Representations." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 192, + 301, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 192, + 301, + 239 + ], + "spans": [ + { + "bbox": [ + 47, + 192, + 301, + 239 + ], + "type": "text", + "content": "[737] A. Wang, A. Singh, J. Michael, F. Hill, O. Levy, and S. R. Bowman, \"Glue: A multi-task benchmark and analysis platform for natural language understanding,\" arXiv preprint arXiv:1804.07461, 2018." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 239, + 301, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 239, + 301, + 285 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 301, + 285 + ], + "type": "text", + "content": "[738] J. Li, X. Cheng, W. X. Zhao, J.-Y. Nie, and J.-R. Wen, \"Halueval: A large-scale hallucination evaluation benchmark for large language models,\" arXiv preprint arXiv:2305.11747, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 285, + 301, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 285, + 301, + 319 + ], + "spans": [ + { + "bbox": [ + 47, + 285, + 301, + 319 + ], + "type": "text", + "content": "[739] A. Pal, L. K. Umapathi, and M. Sankarasubbu, \"Med-halt: Medical domain hallucination test for large language models,\" arXiv preprint arXiv:2307.15343, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 319, + 301, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 319, + 301, + 365 + ], + "spans": [ + { + "bbox": [ + 47, + 319, + 301, + 365 + ], + "type": "text", + "content": "[740] Z. Ji, Y. Gu, W. Zhang, C. Lyu, D. Lin, and K. Chen, \"Anah: Analytical annotation of hallucinations in large language models,\" arXiv preprint arXiv:2405.20315, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 365, + 301, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 365, + 301, + 411 + ], + "spans": [ + { + "bbox": [ + 47, + 365, + 301, + 411 + ], + "type": "text", + "content": "[741] P. Manakul, A. Liusie, and M. J. Gales, \"Selfcheck-gpt: Zero-resource black-box hallucination detection for generative large language models,\" arXiv preprint arXiv:2303.08896, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 412, + 301, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 412, + 301, + 458 + ], + "spans": [ + { + "bbox": [ + 47, + 412, + 301, + 458 + ], + "type": "text", + "content": "[742] Y.-S. Chuang, Y. Xie, H. Luo, Y. Kim, J. Glass, and P. He, \"Dola: Decoding by contrasting layers improves factuality in large language models,\" arXiv preprint arXiv:2309.03883, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 458, + 301, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 458, + 301, + 504 + ], + "spans": [ + { + "bbox": [ + 47, + 458, + 301, + 504 + ], + "type": "text", + "content": "[743] N. Mündler, J. He, S. Jenko, and M. Vechev, \"Self-contradictory hallucinations of large language models: Evaluation, detection and mitigation,\" arXiv preprint arXiv:2305.15852, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 504, + 301, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 504, + 301, + 559 + ], + "spans": [ + { + "bbox": [ + 47, + 504, + 301, + 559 + ], + "type": "text", + "content": "[744] M. Elaraby, M. Lu, J. Dunn, X. Zhang, Y. Wang, S. Liu, P. Tian, Y. Wang, and Y. Wang, \"Halo: Estimation and reduction of hallucinations in open-source weak large language models,\" arXiv preprint arXiv:2308.11764, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 561, + 301, + 607 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 561, + 301, + 607 + ], + "spans": [ + { + "bbox": [ + 47, + 561, + 301, + 607 + ], + "type": "text", + "content": "[745] Z. Ji, D. Chen, E. Ishii, S. Cahyawijaya, Y. Bang, B. Wilie, and P. Fung, \"Llm internal states reveal hallucination risk faced with a query,\" arXiv preprint arXiv:2407.03282, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 608, + 301, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 608, + 301, + 654 + ], + "spans": [ + { + "bbox": [ + 47, + 608, + 301, + 654 + ], + "type": "text", + "content": "[746] J. Wei, Y. Yao, J.-F. Ton, H. Guo, A. Estornell, and Y. Liu, \"Measuring and reducing llm hallucination without gold-standard answers,\" arXiv preprint arXiv:2402.10412, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 654, + 301, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 654, + 301, + 700 + ], + "spans": [ + { + "bbox": [ + 47, + 654, + 301, + 700 + ], + "type": "text", + "content": "[747] A. Deshpande, V. Murahari, T. Rajpurohit, A. Kalyan, and K. Narasimhan, \"Toxicity in chatgpt: Analyzing persona-assigned language models,\" arXiv preprint arXiv:2304.05335, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 700, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 700, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 700, + 301, + 746 + ], + "type": "text", + "content": "[748] A. de Wynter, I. Watts, T. Wongsangaroonsri, M. Zhang, N. Farra, N. E. Altintoprak, L. Baur, S. Claudet, P. Gajdusek, C. Gören et al., \"Rtp-lx: Can llms evaluate toxicity in multilingual scenarios?\"" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 566, + 746 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 335, + 42, + 492, + 54 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 492, + 54 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 492, + 54 + ], + "type": "text", + "content": "arXiv preprint arXiv:2404.14397, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 54, + 564, + 111 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 54, + 564, + 111 + ], + "spans": [ + { + "bbox": [ + 310, + 54, + 564, + 111 + ], + "type": "text", + "content": "[749] D. Esiobu, X. Tan, S. Hosseini, M. Ung, Y. Zhang, J. Fernandes, J. Dwivedi-Yu, E. Presani, A. Williams, and E. M. Smith, \"Robbie: Robust bias evaluation of large generative language models,\" arXiv preprint arXiv:2311.18140, 2023." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 112, + 564, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 112, + 564, + 157 + ], + "spans": [ + { + "bbox": [ + 310, + 112, + 564, + 157 + ], + "type": "text", + "content": "[750] S. Wang, P. Wang, T. Zhou, Y. Dong, Z. Tan, and J. Li, \"Ceb: Compositional evaluation benchmark for fairness in large language models,\" arXiv preprint arXiv:2407.02408, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 158, + 564, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 158, + 564, + 204 + ], + "spans": [ + { + "bbox": [ + 310, + 158, + 564, + 204 + ], + "type": "text", + "content": "[751] H. Li, D. Guo, D. Li, W. Fan, Q. Hu, X. Liu, C. Chan, D. Yao, Y. Yao, and Y. Song, \"Privlm-bench: A multi-level privacy evaluation benchmark for language models,\" arXiv preprint arXiv:2311.04044, 2023." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 205, + 564, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 205, + 564, + 250 + ], + "spans": [ + { + "bbox": [ + 310, + 205, + 564, + 250 + ], + "type": "text", + "content": "[752] Q. Li, J. Hong, C. Xie, J. Tan, R. Xin, J. Hou, X. Yin, Z. Wang, D. Hendrycks, Z. Wang et al., \"Llm-pbe: Assessing data privacy in large language models,\" arXiv preprint arXiv:2408.12787, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 251, + 564, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 251, + 564, + 308 + ], + "spans": [ + { + "bbox": [ + 310, + 251, + 564, + 308 + ], + "type": "text", + "content": "[753] D. Zhu, D. Chen, X. Wu, J. Geng, Z. Li, J. Grossklags, and L. Ma, \"Privauditor: Benchmarking data protection vulnerabilities in llm adaptation techniques,\" Advances in Neural Information Processing Systems, vol. 37, pp. 9668-9689, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 308, + 564, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 308, + 564, + 354 + ], + "spans": [ + { + "bbox": [ + 310, + 308, + 564, + 354 + ], + "type": "text", + "content": "[754] L. Rossi, B. Marek, V. Hanke, X. Wang, M. Backes, A. Dziedzic, and F. Boenisch, \"Auditing empirical privacy protection of private llm adaptations,\" in Neurips Safe Generative AI Workshop 2024." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 354, + 566, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 354, + 566, + 411 + ], + "spans": [ + { + "bbox": [ + 310, + 354, + 566, + 411 + ], + "type": "text", + "content": "[755] T. Singh, H. Aditya, V. K. Madisetti, and A. Bahga, \"Whispered tuning: Data privacy preservation in finetuning llms through differential privacy,\" Journal of Software Engineering and Applications, vol. 17, no. 1, pp. 1-22, 2024." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 412, + 564, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 412, + 564, + 458 + ], + "spans": [ + { + "bbox": [ + 310, + 412, + 564, + 458 + ], + "type": "text", + "content": "[756] H. Li, W. Hu, H. Jing, Y. Chen, Q. Hu, S. Han, T. Chu, P. Hu, and Y. Song, \"Privaci-bench: Evaluating privacy with contextual integrity and legal compliance,\" arXiv preprint arXiv:2502.17041, 2025." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 458, + 564, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 458, + 564, + 493 + ], + "spans": [ + { + "bbox": [ + 310, + 458, + 564, + 493 + ], + "type": "text", + "content": "[757] O. Cartwright, H. Dunbar, and T. Radcliffe, “Evaluating privacy compliance in commercial large language models-chatgpt, claude, and gemini,” 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 493, + 564, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 493, + 564, + 550 + ], + "spans": [ + { + "bbox": [ + 310, + 493, + 564, + 550 + ], + "type": "text", + "content": "[758] X. Zhou, M. Weyssow, R. Widyasari, T. Zhang, J. He, Y. Lyu, J. Chang, B. Zhang, D. Huang, and D. Lo, \"Lessleak-bench: A first investigation of data leakage in llms across 83 software engineering benchmarks,\" arXiv preprint arXiv:2502.06215, 2025." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 550, + 564, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 550, + 564, + 608 + ], + "spans": [ + { + "bbox": [ + 310, + 550, + 564, + 608 + ], + "type": "text", + "content": "[759] Y. Song, R. Liu, S. Chen, Q. Ren, Y. Zhang, and Y. Yu, \"Securesql: Evaluating data leakage of large language models as natural language interfaces to databases,\" in Findings of the Association for Computational Linguistics: EMNLP 2024, 2024, pp. 5975-5990." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 608, + 564, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 608, + 564, + 665 + ], + "spans": [ + { + "bbox": [ + 310, + 608, + 564, + 665 + ], + "type": "text", + "content": "[760] X. Liu, Y. Zhu, J. Gu, Y. Lan, C. Yang, and Y. Qiao, \"Mm-safetybench: A benchmark for safety evaluation of multimodal large language models,\" in European Conference on Computer Vision. Springer, 2024, pp. 386-403." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 666, + 564, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 666, + 564, + 723 + ], + "spans": [ + { + "bbox": [ + 310, + 666, + 564, + 723 + ], + "type": "text", + "content": "[761] W. Luo, S. Ma, X. Liu, X. Guo, and C. Xiao, \"Jailbreakv-28k: A benchmark for assessing the robustness of multimodal large language models against jailbreak attacks,\" arXiv e-prints, pp. arXiv-2404, 2024." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 723, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 723, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 723, + 564, + 746 + ], + "type": "text", + "content": "[762] F. Weng, Y. Xu, C. Fu, and W. Wang, \"A comprehensive study on jailbreak attacks and defenses for" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "62" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 61 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "type": "text", + "content": "multimodal large language models,\" arXiv preprint arXiv:2408.08464, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 66, + 301, + 100 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 66, + 301, + 100 + ], + "spans": [ + { + "bbox": [ + 47, + 66, + 301, + 100 + ], + "type": "text", + "content": "[763] Z. Li, P.-Y. Chen, and T.-Y. Ho, \"Retention score: Quantifying jailbreak risks for vision language models,\" arXiv preprint arXiv:2412.17544, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 100, + 301, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 100, + 301, + 180 + ], + "spans": [ + { + "bbox": [ + 47, + 100, + 301, + 180 + ], + "type": "text", + "content": "[764] T. Guan, F. Liu, X. Wu, R. Xian, Z. Li, X. Liu, X. Wang, L. Chen, F. Huang, Y. Yacoob et al., \"Hallusionbench: an advanced diagnostic suite for entangled language hallucination and visual illusion in large vision-language models,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 14375-14385." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 180, + 301, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 180, + 301, + 226 + ], + "spans": [ + { + "bbox": [ + 47, + 180, + 301, + 226 + ], + "type": "text", + "content": "[765] Y. Li, Y. Du, K. Zhou, J. Wang, W. X. Zhao, and J.-R. Wen, \"Evaluating object hallucination in large vision-language models,\" arXiv preprint arXiv:2305.10355, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 227, + 301, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 227, + 301, + 274 + ], + "spans": [ + { + "bbox": [ + 47, + 227, + 301, + 274 + ], + "type": "text", + "content": "[766] C. Cui, Y. Zhou, X. Yang, S. Wu, L. Zhang, J. Zou, and H. Yao, “Holistic analysis of hallucination in gpt-4v (ision): Bias and interference challenges,” arXiv preprint arXiv:2311.03287, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 274, + 301, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 274, + 301, + 308 + ], + "spans": [ + { + "bbox": [ + 47, + 274, + 301, + 308 + ], + "type": "text", + "content": "[767] S. Wang, X. Ye, Q. Cheng, J. Duan, S. Li, J. Fu, X. Qiu, and X. Huang, \"Cross-modality safety alignment,\" arXiv preprint arXiv:2406.15279, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 308, + 301, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 301, + 365 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 301, + 365 + ], + "type": "text", + "content": "[768] A. Agarwal, S. Panda, A. Charles, B. Kumar, H. Patel, P. Pattnayak, T. H. Rafi, T. Kumar, and D.-K. Chae, \"Mvtamperbench: Evaluating robustness of vision-language models,\" arXiv preprint arXiv:2412.19794, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 365, + 301, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 365, + 301, + 423 + ], + "spans": [ + { + "bbox": [ + 47, + 365, + 301, + 423 + ], + "type": "text", + "content": "[769] H. Zhang, W. Shao, H. Liu, Y. Ma, P. Luo, Y. Qiao, and K. Zhang, \"Avibench: Towards evaluating the robustness of large vision-language model on adversarial visual-instructions,\" arXiv e-prints, pp. arXiv-2403, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 423, + 301, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 423, + 301, + 458 + ], + "spans": [ + { + "bbox": [ + 47, + 423, + 301, + 458 + ], + "type": "text", + "content": "[770] Z. Hu, Y. Ren, J. Li, and Y. Yin, \"Viva: A benchmark for vision-grounded decision-making with human values,\" arXiv preprint arXiv:2407.03000, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 458, + 301, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 458, + 301, + 515 + ], + "spans": [ + { + "bbox": [ + 47, + 458, + 301, + 515 + ], + "type": "text", + "content": "[771] Y. Xiao, A. Liu, Q. Cheng, Z. Yin, S. Liang, J. Li, J. Shao, X. Liu, and D. Tao, \"Genderbias- " + }, + { + "bbox": [ + 47, + 458, + 301, + 515 + ], + "type": "inline_equation", + "content": "\\cdot" + }, + { + "bbox": [ + 47, + 458, + 301, + 515 + ], + "type": "text", + "content": " emph {VL}: Benchmarking gender bias in vision language models via counterfactual probing,\" arXiv preprint arXiv:2407.00600, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 515, + 301, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 515, + 301, + 573 + ], + "spans": [ + { + "bbox": [ + 47, + 515, + 301, + 573 + ], + "type": "text", + "content": "[772] L. Gustafson, C. Rolland, N. Ravi, Q. Duval, A. Adcock, C.-Y. Fu, M. Hall, and C. Ross, \"Facet: Fairness in computer vision evaluation benchmark,\" in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2023, pp. 20370-20382." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 573, + 301, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 573, + 301, + 631 + ], + "spans": [ + { + "bbox": [ + 47, + 573, + 301, + 631 + ], + "type": "text", + "content": "[773] E. Slyman, S. Lee, S. Cohen, and K. Kafle, \"Fairdedup: Detecting and mitigating vision-language fairness disparities in semantic dataset dedduplication,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 13905-13916." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 631, + 301, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 631, + 301, + 689 + ], + "spans": [ + { + "bbox": [ + 47, + 631, + 301, + 689 + ], + "type": "text", + "content": "[774] Y. Zhang, J. Wang, and J. Sang, \"Counterfactually measuring and eliminating social bias in vision-language pre-training models,\" in Proceedings of the 30th ACM International Conference on Multimedia, 2022, pp. 4996-5004." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 689, + 301, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 301, + 734 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 301, + 734 + ], + "type": "text", + "content": "[775] K. C. Fraser and S. Kiritchenko, \"Examining gender and racial bias in large vision-language models using a novel dataset of parallel images,\" arXiv preprint arXiv:2402.05779, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 734, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 734, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 47, + 734, + 301, + 747 + ], + "type": "text", + "content": "[776] A. Seth, M. Hemani, and C. Agarwal, \"Dear: Debias" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 747 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 78 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 78 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 78 + ], + "type": "text", + "content": "ing vision-language models with additive residuals,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 6820-6829." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 78, + 564, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 78, + 564, + 123 + ], + "spans": [ + { + "bbox": [ + 310, + 78, + 564, + 123 + ], + "type": "text", + "content": "[777] S. Janghorbani and G. De Melo, \"Multimodal bias: Introducing a framework for stereotypical bias assessment beyond gender and race in vision language models,\" arXiv preprint arXiv:2303.12734, 2023." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 124, + 564, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 124, + 564, + 180 + ], + "spans": [ + { + "bbox": [ + 310, + 124, + 564, + 180 + ], + "type": "text", + "content": "[778] Y. Zhang, Y. Huang, Y. Sun, C. Liu, Z. Zhao, Z. Fang, Y. Wang, H. Chen, X. Yang, X. Wei et al., \"Benchmarking trustworthiness of multimodal large language models: A comprehensive study,\" arXiv preprint arXiv:2406.07057, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 180, + 564, + 237 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 180, + 564, + 237 + ], + "spans": [ + { + "bbox": [ + 310, + 180, + 564, + 237 + ], + "type": "text", + "content": "[779] Y. Zhang, L. Chen, G. Zheng, Y. Gao, R. Zheng, J. Fu, Z. Yin, S. Jin, Y. Qiao, X. Huang et al., \"Spa-vl: A comprehensive safety preference alignment dataset for vision language model,\" arXiv preprint arXiv:2406.12030, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 238, + 564, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 238, + 564, + 295 + ], + "spans": [ + { + "bbox": [ + 310, + 238, + 564, + 295 + ], + "type": "text", + "content": "[780] Z. Zhang, T. Kou, S. Wang, C. Li, W. Sun, W. Wang, X. Li, Z. Wang, X. Cao, X. Min et al., \"Q-eval-100k: Evaluating visual quality and alignment level for text-to-vision content,\" arXiv preprint arXiv:2503.02357, 2025." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 295, + 564, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 295, + 564, + 366 + ], + "spans": [ + { + "bbox": [ + 310, + 295, + 564, + 366 + ], + "type": "text", + "content": "[781] W. Wang, X. Liu, K. Gao, J.-T. Huang, Y. Yuan, P. He, S. Wang, and Z. Tu, \"Can't see the forest for the trees: Benchmarking multimodal safety awareness for multimodal llms,\" ArXiv, vol. abs/2502.11184, 2025. [Online]. Available: https://api.sementicscholar.org/CorpusID:276409442" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 366, + 564, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 366, + 564, + 435 + ], + "spans": [ + { + "bbox": [ + 310, + 366, + 564, + 435 + ], + "type": "text", + "content": "[782] W. Wang, K. Gao, Z. Jia, Y. Yuan, J.-T. Huang, Q. Liu, S. Wang, W. Jiao, and Z. Tu, \"Chain-of-jailbreak attack for image generation models via editing step by step,\" ArXiv, vol. abs/2410.03869, 2024. [Online]. Available: https://api_semanticscholar.org/ CorpusID:273186566" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 435, + 564, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 435, + 564, + 480 + ], + "spans": [ + { + "bbox": [ + 310, + 435, + 564, + 480 + ], + "type": "text", + "content": "[783] H. Naveed, A. U. Khan, S. Qiu, M. Saqib, S. Anwar, M. Usman, N. Akhtar, N. Barnes, and A. Mian, \"A comprehensive overview of large language models,\" arXiv preprint arXiv:2307.06435, 2023." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 481, + 564, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 481, + 564, + 538 + ], + "spans": [ + { + "bbox": [ + 310, + 481, + 564, + 538 + ], + "type": "text", + "content": "[784] W. Zhao, Y. Hu, Y. Deng, J. Guo, X. Sui, X. Han, A. Zhang, Y. Zhao, B. Qin, T.-S. Chua et al., \"Beware of your po! measuring and mitigating ai safety risks in role-play fine-tuning of llms,\" arXiv preprint arXiv:2502.20968, 2025." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 538, + 564, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 538, + 564, + 597 + ], + "spans": [ + { + "bbox": [ + 310, + 538, + 564, + 597 + ], + "type": "text", + "content": "[785] B. Liu, X. Li, J. Zhang, J. Wang, T. He, S. Hong, H. Liu, S. Zhang, K. Song, K. Zhu et al., \"Advances and challenges in foundation agents: From brain-inspired intelligence to evolutionary, collaborative, and safe systems,\" arXiv preprint arXiv:2504.01990, 2025." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 597, + 564, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 597, + 564, + 643 + ], + "spans": [ + { + "bbox": [ + 310, + 597, + 564, + 643 + ], + "type": "text", + "content": "[786] H. Jin, L. Huang, H. Cai, J. Yan, B. Li, and H. Chen, \"From llms to llm-based agents for software engineering: A survey of current, challenges and future,\" arXiv preprint arXiv:2408.02479, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 643, + 564, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 643, + 564, + 700 + ], + "spans": [ + { + "bbox": [ + 310, + 643, + 564, + 700 + ], + "type": "text", + "content": "[787] J. Piao, Y. Yan, J. Zhang, N. Li, J. Yan, X. Lan, Z. Lu, Z. Zheng, J. Y. Wang, D. Zhou et al., \"Agentsociety: Large-scale simulation of llm-driven generative agents advances understanding of human behaviors and society,\" arXiv preprint arXiv:2502.08691, 2025." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 700, + 564, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 700, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 310, + 700, + 564, + 747 + ], + "type": "text", + "content": "[788] Y. Yan, S. Wang, J. Huo, P. S. Yu, X. Hu, and Q. Wen, \"Mathagent: Leveraging a mixture-of-math-agent framework for real-world multimodal mathematical error detection,\" arXiv preprint arXiv:2503.18132, 2025." + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "63" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 62 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 42, + 301, + 99 + ], + "spans": [ + { + "bbox": [ + 47, + 42, + 301, + 99 + ], + "type": "text", + "content": "[789] H. Wang, A. Zhang, N. Duy Tai, J. Sun, T.-S. Chua et al., \"Ali-agent: Assessing llms' alignment with human values via agent-based evaluation,\" Advances in Neural Information Processing Systems, vol. 37, pp. 99040-99088, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 100, + 301, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 100, + 301, + 146 + ], + "spans": [ + { + "bbox": [ + 47, + 100, + 301, + 146 + ], + "type": "text", + "content": "[790] K. Zhang, J. Li, G. Li, X. Shi, and Z. Jin, \"Codeagent: Enhancing code generation with tool-integrated agent systems for real-world repo-level coding challenges,\" arXiv preprint arXiv:2401.07339, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 147, + 301, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 147, + 301, + 192 + ], + "spans": [ + { + "bbox": [ + 47, + 147, + 301, + 192 + ], + "type": "text", + "content": "[791] Y. Shen, K. Song, X. Tan, D. Li, W. Lu, and Y. Zhuang, \"Hugginggpt: Solving ai tasks with chatgpt and its friends in hugging face,\" Advances in Neural Information Processing Systems, vol. 36, pp. 38154-38180, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 193, + 301, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 193, + 301, + 238 + ], + "spans": [ + { + "bbox": [ + 47, + 193, + 301, + 238 + ], + "type": "text", + "content": "[792] Z. Chu, S. Wang, J. Xie, T. Zhu, Y. Yan, J. Ye, A. Zhong, X. Hu, J. Liang, P. S. Yu et al., \"Llm agents for education: Advances and applications,\" arXiv preprint arXiv:2503.11733, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 239, + 301, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 239, + 301, + 283 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 301, + 283 + ], + "type": "text", + "content": "[793] W. Zhang, Y. Shen, W. Lu, and Y. Zhuang, \"Data-copilot: Bridging billions of data and humans with autonomous workflow,\" arXiv preprint arXiv:2306.07209, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 285, + 301, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 285, + 301, + 319 + ], + "spans": [ + { + "bbox": [ + 47, + 285, + 301, + 319 + ], + "type": "text", + "content": "[794] W. Xu, Z. Liang, K. Mei, H. Gao, J. Tan, and Y. Zhang, \"A-mem: Agentic memory for llm agents,\" arXiv preprint arXiv:2502.12110, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 320, + 301, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 320, + 301, + 354 + ], + "spans": [ + { + "bbox": [ + 47, + 320, + 301, + 354 + ], + "type": "text", + "content": "[795] Y. Shang, Y. Li, K. Zhao, L. Ma, J. Liu, F. Xu, and Y. Li, \"Agentsquare: Automatic llm agent search in modular design space,\" arXiv preprint arXiv:2410.06153, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 354, + 301, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 354, + 301, + 412 + ], + "spans": [ + { + "bbox": [ + 47, + 354, + 301, + 412 + ], + "type": "text", + "content": "[796] J. Yang, C. Jimenez, A. Wettig, K. Lieret, S. Yao, K. Narasimhan, and O. Press, \"Swe-agent: Agent-computer interfaces enable automated software engineering,\" Advances in Neural Information Processing Systems, vol. 37, pp. 50528-50652, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 413, + 301, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 413, + 301, + 458 + ], + "spans": [ + { + "bbox": [ + 47, + 413, + 301, + 458 + ], + "type": "text", + "content": "[797] S. Agashe, J. Han, S. Gan, J. Yang, A. Li, and X. E. Wang, \"Agent s: An open agentic framework that uses computers like a human,\" arXiv preprint arXiv:2410.08164, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 459, + 301, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 459, + 301, + 503 + ], + "spans": [ + { + "bbox": [ + 47, + 459, + 301, + 503 + ], + "type": "text", + "content": "[798] S. Hao, Y. Gu, H. Ma, J. J. Hong, Z. Wang, D. Z. Wang, and Z. Hu, \"Reasoning with language model is planning with world model,\" arXiv preprint arXiv:2305.14992, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 504, + 301, + 549 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 504, + 301, + 549 + ], + "spans": [ + { + "bbox": [ + 47, + 504, + 301, + 549 + ], + "type": "text", + "content": "[799] J. Hong, J. Lin, A. Dragan, and S. Levine, \"Interactive dialogue agents via reinforcement learning on hindsight regenerations,\" arXiv preprint arXiv:2411.05194, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 550, + 301, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 550, + 301, + 585 + ], + "spans": [ + { + "bbox": [ + 47, + 550, + 301, + 585 + ], + "type": "text", + "content": "[800] J. Tang, T. Fan, and C. Huang, \"Autoagent: A fully-automated and zero-code framework for llm agents,\" arXiv e-prints, pp. arXiv-2502, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 586, + 301, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 586, + 301, + 643 + ], + "spans": [ + { + "bbox": [ + 47, + 586, + 301, + 643 + ], + "type": "text", + "content": "[801] G. Li, H. Hammoud, H. Itani, D. Khizbullin, and B. Ghanem, \"Camel: Communicative agents for\" mind\" exploration of large language model society,\" Advances in Neural Information Processing Systems, vol. 36, pp. 51991-52008, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 643, + 301, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 643, + 301, + 688 + ], + "spans": [ + { + "bbox": [ + 47, + 643, + 301, + 688 + ], + "type": "text", + "content": "[802] S. Yuan, K. Song, J. Chen, X. Tan, D. Li, and D. Yang, \"Evoagent: Towards automatic multi-agent generation via evolutionary algorithms,\" arXiv preprint arXiv:2406.14228, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 689, + 301, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 689, + 301, + 734 + ], + "spans": [ + { + "bbox": [ + 47, + 689, + 301, + 734 + ], + "type": "text", + "content": "[803] M. Zhuge, W. Wang, L. Kirsch, F. Faccio, D. Khizbullin, and J. Schmidhuber, \"Language agents as estimizable graphs,\" arXiv preprint arXiv:2402.16823, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 734, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 734, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 734, + 301, + 746 + ], + "type": "text", + "content": "[804] Y. Wang, T. Shen, L. Liu, and J. Xie, \"Sibyl: Simple" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 36, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "type": "text", + "content": "yet effective agent framework for complex real-world reasoning,\" arXiv preprint arXiv:2407.10718, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 66, + 564, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 66, + 564, + 112 + ], + "spans": [ + { + "bbox": [ + 310, + 66, + 564, + 112 + ], + "type": "text", + "content": "[805] Z. Wang, X. Zeng, W. Liu, L. Li, Y. Wang, L. Shang, X. Jiang, Q. Liu, and K.-F. Wong, \"Toolflow: Boosting llm tool-calling through natural and coherent dialogue synthesis,\" arXiv preprint arXiv:2410.18447, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 113, + 564, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 113, + 564, + 145 + ], + "spans": [ + { + "bbox": [ + 310, + 113, + 564, + 145 + ], + "type": "text", + "content": "[806] F. Wu, S. Wu, Y. Cao, and C. Xiao, \"Wipi: A new web threat for llm-driven web agents,\" arXiv preprint arXiv:2402.16965, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 147, + 564, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 147, + 564, + 204 + ], + "spans": [ + { + "bbox": [ + 310, + 147, + 564, + 204 + ], + "type": "text", + "content": "[807] S. S. Kannan, V. L. Venkatesh, and B.-C. Min, \"Smartllm: Smart multi-agent robot task planning using large language models,\" in 2024 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2024, pp. 12140-12147." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 205, + 564, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 205, + 564, + 249 + ], + "spans": [ + { + "bbox": [ + 310, + 205, + 564, + 249 + ], + "type": "text", + "content": "[808] R. Fang, R. Bindu, A. Gupta, and D. Kang, \"Llm agents can autonomously exploit one-day vulnerabilities,\" arXiv preprint arXiv:2404.08144, vol. 13, p. 14, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 251, + 564, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 251, + 564, + 285 + ], + "spans": [ + { + "bbox": [ + 310, + 251, + 564, + 285 + ], + "type": "text", + "content": "[809] R. Fang, R. Bindu, A. Gupta, Q. Zhan, and D. Kang, \"Llm agents can autonomously hack websites,\" arXiv preprint arXiv:2402.06664, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 285, + 564, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 285, + 564, + 319 + ], + "spans": [ + { + "bbox": [ + 310, + 285, + 564, + 319 + ], + "type": "text", + "content": "[810] W. Cheng, K. Sun, X. Zhang, and W. Wang, \"Security attacks on llm-based code completion tools,\" arXiv preprint arXiv:2408.11006, 2024." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 320, + 564, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 320, + 564, + 365 + ], + "spans": [ + { + "bbox": [ + 310, + 320, + 564, + 365 + ], + "type": "text", + "content": "[811] X. Fu, Z. Wang, S. Li, R. K. Gupta, N. Mireshghallah, T. Berg-Kirkpatrick, and E. Fernandes, \"Misusing tools in large language models with visual adversarial examples,\" arXiv preprint arXiv:2310.03185, 2023." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 366, + 564, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 366, + 564, + 411 + ], + "spans": [ + { + "bbox": [ + 310, + 366, + 564, + 411 + ], + "type": "text", + "content": "[812] X. Fu, S. Li, Z. Wang, Y. Liu, R. K. Gupta, T. Berg-Kirkpatrick, and E. Fernandes, \"Imprompter: Tricking llm agents into improper tool use,\" arXiv preprint arXiv:2410.14923, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 412, + 564, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 412, + 564, + 458 + ], + "spans": [ + { + "bbox": [ + 310, + 412, + 564, + 458 + ], + "type": "text", + "content": "[813] B. Zhang, Y. Tan, Y. Shen, A. Salem, M. Backes, S. Zannettou, and Y. Zhang, \"Breaking agents: Compromising autonomous llm agents through malfunction amplification,\" arXiv preprint arXiv:2407.20859, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 459, + 564, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 459, + 564, + 504 + ], + "spans": [ + { + "bbox": [ + 310, + 459, + 564, + 504 + ], + "type": "text", + "content": "[814] H. Wang, R. Zhang, J. Wang, M. Li, Y. Huang, D. Wang, and Q. Wang, \"From allies to adversaries: Manipulating llm tool-calling through adversarial injection,\" arXiv preprint arXiv:2412.10198, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 505, + 564, + 560 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 505, + 564, + 560 + ], + "spans": [ + { + "bbox": [ + 310, + 505, + 564, + 560 + ], + "type": "text", + "content": "[815] W. Yang, X. Bi, Y. Lin, S. Chen, J. Zhou, and X. Sun, \"Watch out for your agents! investigating backdoor threats to lvm-based agents,\" Advances in Neural Information Processing Systems, vol. 37, pp. 100938-100964, 2024." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 562, + 564, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 562, + 564, + 608 + ], + "spans": [ + { + "bbox": [ + 310, + 562, + 564, + 608 + ], + "type": "text", + "content": "[816] P. Zhu, Z. Zhou, Y. Zhang, S. Yan, K. Wang, and S. Su, \"Demonagent: Dynamically encrypted multi-backdoor implantation attack on llm-based agent,\" arXiv preprint arXiv:2502.12575, 2025." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 609, + 564, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 609, + 564, + 643 + ], + "spans": [ + { + "bbox": [ + 310, + 609, + 564, + 643 + ], + "type": "text", + "content": "[817] Y. Wang, D. Xue, S. Zhang, and S. Qian, \"Badagent: Inserting and activating backdoor attacks in llm agents,\" arXiv preprint arXiv:2406.03007, 2024." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 643, + 564, + 699 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 643, + 564, + 699 + ], + "spans": [ + { + "bbox": [ + 310, + 643, + 564, + 699 + ], + "type": "text", + "content": "[818] Z. Jiang, M. Li, G. Yang, J. Wang, Y. Huang, Z. Chang, and Q. Wang, \"Mimicking the familiar: Dynamic command generation for information theft attacks in llm tool-learning system,\" arXiv preprint arXiv:2502.11358, 2025." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 700, + 564, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 700, + 564, + 734 + ], + "spans": [ + { + "bbox": [ + 310, + 700, + 564, + 734 + ], + "type": "text", + "content": "[819] W. Zhao, V. Khazanchi, H. Xing, X. He, Q. Xu, and N. D. Lane, \"Attacks on third-party apis of large language models,\" arXiv preprint arXiv:2404.16891, 2024." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 310, + 735, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 735, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 735, + 564, + 746 + ], + "type": "text", + "content": "[820] J. Chen and S. L. Cong, \"Agentguard: Repurposing" + } + ] + } + ], + "index": 35 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "64" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 63 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "type": "text", + "content": "agentric orchestrator for safety evaluation of tool orchestration,\" arXiv preprint arXiv:2502.09809, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 66, + 301, + 122 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 66, + 301, + 122 + ], + "spans": [ + { + "bbox": [ + 47, + 66, + 301, + 122 + ], + "type": "text", + "content": "[821] X. Zhang, H. Xu, Z. Ba, Z. Wang, Y. Hong, J. Liu, Z. Qin, and K. Ren, \"Privacyasst: Safeguarding user privacy in tool-using large language model agents,\" IEEE Transactions on Dependable and Secure Computing, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 124, + 301, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 124, + 301, + 179 + ], + "spans": [ + { + "bbox": [ + 47, + 124, + 301, + 179 + ], + "type": "text", + "content": "[822] Z. Xiang, L. Zheng, Y. Li, J. Hong, Q. Li, H. Xie, J. Zhang, Z. Xiong, C. Xie, C. Yang et al., \"Guardagent: Safeguard llm agents by a guard agent via knowledge-enabled reasoning,\" arXiv preprint arXiv:2406.09187, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 181, + 301, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 181, + 301, + 227 + ], + "spans": [ + { + "bbox": [ + 47, + 181, + 301, + 227 + ], + "type": "text", + "content": "[823] Y. Gao, Y. Xiong, X. Gao, K. Jia, J. Pan, Y. Bi, Y. Dai, J. Sun, H. Wang, and H. Wang, \"Retrieval-augmented generation for large language models: A survey,\" arXiv preprint arXiv:2312.10997, vol. 2, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 228, + 301, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 228, + 301, + 274 + ], + "spans": [ + { + "bbox": [ + 47, + 228, + 301, + 274 + ], + "type": "text", + "content": "[824] P. Zhao, H. Zhang, Q. Yu, Z. Wang, Y. Geng, F. Fu, L. Yang, W. Zhang, J. Jiang, and B. Cui, \"Retrievalaugmented generation for ai-generated content: A survey,\" arXiv preprint arXiv:2402.19473, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 274, + 301, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 274, + 301, + 308 + ], + "spans": [ + { + "bbox": [ + 47, + 274, + 301, + 308 + ], + "type": "text", + "content": "[825] C. Xiang, T. Wu, Z. Zhong, D. Wagner, D. Chen, and P. Mittal, \"Certifiably robust rag against retrieval corruption,\" arXiv preprint arXiv:2405.15556, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 308, + 301, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 301, + 365 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 301, + 365 + ], + "type": "text", + "content": "[826] Z. Chen, Z. Xiang, C. Xiao, D. Song, and B. Li, \"Agentpoison: Red-teaming llm agents via poisoning memory or knowledge bases,\" Advances in Neural Information Processing Systems, vol. 37, pp. 130-185-130-213, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 365, + 301, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 365, + 301, + 411 + ], + "spans": [ + { + "bbox": [ + 47, + 365, + 301, + 411 + ], + "type": "text", + "content": "[827] W. Zou, R. Geng, B. Wang, and J. Jia, \"Poisonedrag: Knowledge corruption attacks to retrieval-augmented generation of large language models,\" arXiv preprint arXiv:2402.07867, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 412, + 301, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 412, + 301, + 446 + ], + "spans": [ + { + "bbox": [ + 47, + 412, + 301, + 446 + ], + "type": "text", + "content": "[828] Z. Zhong, Z. Huang, A. Wettig, and D. Chen, \"Poisoning retrieval corpora by injecting adversarial passages,\" arXiv preprint arXiv:2310.19156, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 447, + 301, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 447, + 301, + 492 + ], + "spans": [ + { + "bbox": [ + 47, + 447, + 301, + 492 + ], + "type": "text", + "content": "[829] X. Gu, X. Zheng, T. Pang, C. Du, Q. Liu, Y. Wang, J. Jiang, and M. Lin, \"Agent smith: A single image can jailbreak one million multimodal llm agents exponentially fast,\" arXiv preprint arXiv:2402.08567, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 493, + 301, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 493, + 301, + 538 + ], + "spans": [ + { + "bbox": [ + 47, + 493, + 301, + 538 + ], + "type": "text", + "content": "[830] A. Li, Y. Zhou, V. C. Raghuram, T. Goldstein, and M. Goldblum, \"Commercial llm agents are already vulnerable to simple yet dangerous attacks,\" arXiv preprint arXiv:2502.08586, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 539, + 301, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 539, + 301, + 584 + ], + "spans": [ + { + "bbox": [ + 47, + 539, + 301, + 584 + ], + "type": "text", + "content": "[831] H. Li, M. Xu, and Y. Song, \"Sentence embedding leaks more information than you expect: Generative embedding inversion attack to recover the whole sentence,\" arXiv preprint arXiv:2305.03010, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 585, + 301, + 630 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 585, + 301, + 630 + ], + "spans": [ + { + "bbox": [ + 47, + 585, + 301, + 630 + ], + "type": "text", + "content": "[832] M. Russinovich, A. Salem, and R. Eldan, \"Great, now write an article about that: The crescendo multi-turn llm jailbreak attack,\" arXiv preprint arXiv:2404.01833, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 632, + 301, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 632, + 301, + 677 + ], + "spans": [ + { + "bbox": [ + 47, + 632, + 301, + 677 + ], + "type": "text", + "content": "[833] Y. Cheng, M. Georgopoulos, V. Cevher, and G. G. Chrysos, \"Leveraging the context through multiround interactions for jailbreaking attacks,\" arXiv preprint arXiv:2402.09177, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 677, + 301, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 301, + 734 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 301, + 734 + ], + "type": "text", + "content": "[834] A. Priyanshu and S. Vijay, \"Fractured-sorry-bench: Framework for revealing attacks in conversational turns undermining refusal efficacy and defenses over sorry-bench (automated multi-shot jailbreaks),\" arXiv preprint arXiv:2408.16163, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 735, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 735, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 735, + 301, + 746 + ], + "type": "text", + "content": "[835] D. Agarwal, A. R. Fabbri, B. Risher, P. Laban," + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 77 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 77 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 77 + ], + "type": "text", + "content": "S. Joty, and C.-S. Wu, \"Prompt leakage effect and defense strategies for multi-turn llm interactions,\" arXiv preprint arXiv:2404.16251, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 78, + 564, + 121 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 78, + 564, + 121 + ], + "spans": [ + { + "bbox": [ + 310, + 78, + 564, + 121 + ], + "type": "text", + "content": "[836] T. Tong, J. Xu, Q. Liu, and M. Chen, \"Securing multi-turn conversational language models from distributed backdoor triggers,\" arXiv preprint arXiv:2407.04151, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 122, + 564, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 122, + 564, + 180 + ], + "spans": [ + { + "bbox": [ + 310, + 122, + 564, + 180 + ], + "type": "text", + "content": "[837] J. Mao, F. Meng, Y. Duan, M. Yu, X. Jia, J. Fang, Y. Liang, K. Wang, and Q. Wen, \"Agentsafe: Safeguarding large language model-based multi-agent systems via hierarchical data management,\" arXiv preprint arXiv:2503.04392, 2025." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 181, + 564, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 181, + 564, + 216 + ], + "spans": [ + { + "bbox": [ + 310, + 181, + 564, + 216 + ], + "type": "text", + "content": "[838] H. Zhou, K.-H. Lee, Z. Zhan, Y. Chen, and Z. Li, \"Trustrag: Enhancing robustness and trustworthiness in rag,\" arXiv preprint arXiv:2501.00879, 2025." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 216, + 564, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 216, + 564, + 274 + ], + "spans": [ + { + "bbox": [ + 310, + 216, + 564, + 274 + ], + "type": "text", + "content": "[839] X. Xian, G. Wang, X. Bi, J. Srinivasa, A. Kundu, C. Fleming, M. Hong, and J. Ding, \"On the vulnerability of applying retrieval-augmented generation within knowledge-intensive application domains,\" arXiv preprint arXiv:2409.17275, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 274, + 564, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 274, + 564, + 331 + ], + "spans": [ + { + "bbox": [ + 310, + 274, + 564, + 331 + ], + "type": "text", + "content": "[840] B. Chen, G. Wang, H. Guo, Y. Wang, and Q. Yan, \"Understanding multi-turn toxic behaviors in open-domain chatbots,\" in Proceedings of the 26th International Symposium on Research in Attacks, Intrusions and Defenses, 2023, pp. 282-296." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 331, + 564, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 331, + 564, + 376 + ], + "spans": [ + { + "bbox": [ + 310, + 331, + 564, + 376 + ], + "type": "text", + "content": "[841] R. Song, M. O. Ozmen, H. Kim, A. Bianchi, and Z. B. Celik, \"Enhancing llm-based autonomous driving agents to mitigate perception attacks,\" arXiv preprint arXiv:2409.14488, 2024." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 376, + 564, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 376, + 564, + 423 + ], + "spans": [ + { + "bbox": [ + 310, + 376, + 564, + 423 + ], + "type": "text", + "content": "[842] C. H. Low, Z. Wang, T. Zhang, Z. Zeng, Z. Zhuo, E. B. Mazomenos, and Y. Jin, \"Surgraw: Multi-agent workflow with chain-of-thought reasoning for surgical intelligence,\" arXiv preprint arXiv:2503.10265, 2025." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 423, + 564, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 423, + 564, + 468 + ], + "spans": [ + { + "bbox": [ + 310, + 423, + 564, + 468 + ], + "type": "text", + "content": "[843] Z. Wang, J. Wu, C. H. Low, and Y. Jin, \"Medagent-pro: Towards multi-modal evidence-based medical diagnosis via reasoning agentic workflow,\" arXiv preprint arXiv:2503.18968, 2025." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 469, + 564, + 527 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 469, + 564, + 527 + ], + "spans": [ + { + "bbox": [ + 310, + 469, + 564, + 527 + ], + "type": "text", + "content": "[844] K. N. Jeptoo and C. Sun, \"Enhancing fake news detection with large language models through multi-agent debates,\" in CCF International Conference on Natural Language Processing and Chinese Computing. Springer, 2024, pp. 474-486." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 527, + 564, + 561 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 527, + 564, + 561 + ], + "spans": [ + { + "bbox": [ + 310, + 527, + 564, + 561 + ], + "type": "text", + "content": "[845] T. Park, \"Enhancing anomaly detection in financial markets with an llm-based multi-agent framework,\" arXiv preprint arXiv:2403.19735, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 562, + 564, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 562, + 564, + 618 + ], + "spans": [ + { + "bbox": [ + 310, + 562, + 564, + 618 + ], + "type": "text", + "content": "[846] Z. Yang, S. S. Raman, A. Shah, and S. Tellex, \"Plug in the safety chip: Enforcing constraints for llm-driven robot agents,\" in 2024 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2024, pp. 14435-14442." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 619, + 564, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 619, + 564, + 677 + ], + "spans": [ + { + "bbox": [ + 310, + 619, + 564, + 677 + ], + "type": "text", + "content": "[847] J. Zhang, C. Xu, and B. Li, \"Chatscene: Knowledge-enabled safety-critical scenario generation for autonomous vehicles,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 15459-15469." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 677, + 564, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 677, + 564, + 723 + ], + "spans": [ + { + "bbox": [ + 310, + 677, + 564, + 723 + ], + "type": "text", + "content": "[848] T. Abuelsaad, D. Akkil, P. Dey, A. Jagmohan, A. Vempaty, and R. Kokku, \"Agent-e: From autonomous web navigation to foundational design principles in agenti-tic systems,\" arXiv preprint arXiv:2407.13032, 2024." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 724, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 724, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 724, + 564, + 746 + ], + "type": "text", + "content": "[849] E. Debenedetti, J. Zhang, M. Balunović, L. Beurer-Kellner, M. Fischer, and F. Tramère, \"Agentdojo: A dy" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "65" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 64 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 42, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 299, + 65 + ], + "type": "text", + "content": "namic environment to evaluate attacks and defenses for llm agents,\" arXiv preprint arXiv:2406.13352, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 66, + 301, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 66, + 301, + 123 + ], + "spans": [ + { + "bbox": [ + 46, + 66, + 301, + 123 + ], + "type": "text", + "content": "[850] Y. Sun, N. Salami Pargoo, P. Jin, and J. Ortiz, \"Optimizing autonomous driving for safety: A human-centric approach with lvm-enhanced rlhf,\" in Companion of the 2024 on ACM International Joint Conference on Pervasive and Ubiquitous Computing, 2024, pp. 76-80." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 124, + 301, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 124, + 301, + 167 + ], + "spans": [ + { + "bbox": [ + 47, + 124, + 301, + 167 + ], + "type": "text", + "content": "[851] R. Fang, R. Bindu, A. Gupta, and D. Kang, \"Llm agents can autonomously exploit one-day vulnerabilities,\" arXiv preprint arXiv:2404.08144, vol. 13, p. 14, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 170, + 301, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 170, + 301, + 227 + ], + "spans": [ + { + "bbox": [ + 47, + 170, + 301, + 227 + ], + "type": "text", + "content": "[852] Y. H. Ke, R. Yang, S. A. Lie, T. X. Y. Lim, H. R. Abdullah, D. S. W. Ting, and N. Liu, \"Enhancing diagnostic accuracy through multi-agent conversations: using large language models to mitigate cognitive bias,\" arXiv preprint arXiv:2401.14589, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 228, + 301, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 228, + 301, + 272 + ], + "spans": [ + { + "bbox": [ + 47, + 228, + 301, + 272 + ], + "type": "text", + "content": "[853] X. Mou, Z. Wei, and X. Huang, \"Unveiling the truth and facilitating change: Towards agent-based largescale social movement simulation,\" arXiv preprint arXiv:2402.16333, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 274, + 301, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 274, + 301, + 319 + ], + "spans": [ + { + "bbox": [ + 47, + 274, + 301, + 319 + ], + "type": "text", + "content": "[854] Z. Chen, J. Chen, J. Chen, and M. Sra, \"Position: Standard benchmarks fail-llm agents present overlooked risks for financial applications,\" arXiv preprint arXiv:2502.15865, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 320, + 301, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 320, + 301, + 376 + ], + "spans": [ + { + "bbox": [ + 47, + 320, + 301, + 376 + ], + "type": "text", + "content": "[855] Z. Liu, R. Zeng, D. Wang, G. Peng, J. Wang, Q. Liu, P. Liu, and W. Wang, \"Agents4plc: Automating closed-loop plc code generation and verification in industrial control systems using llm-based agents,\" arXiv preprint arXiv:2410.14209, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 377, + 301, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 377, + 301, + 434 + ], + "spans": [ + { + "bbox": [ + 47, + 377, + 301, + 434 + ], + "type": "text", + "content": "[856] S. Mukherjee, P. Gamble, M. S. Ausin, N. Kant, K. Aggarwal, N. Manjunath, D. Datta, Z. Liu, J. Ding, S. Busacca et al., \"Polaris: A safety-focused llm constellation architecture for healthcare,\" arXiv preprint arXiv:2403.13313, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 435, + 301, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 435, + 301, + 479 + ], + "spans": [ + { + "bbox": [ + 47, + 435, + 301, + 479 + ], + "type": "text", + "content": "[857] L. La Cava and A. Tagarelli, \"Safeguarding decentralized social media: Llm agents for automating community rule compliance,\" arXiv preprint arXiv:2409.08963, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 481, + 301, + 527 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 481, + 301, + 527 + ], + "spans": [ + { + "bbox": [ + 47, + 481, + 301, + 527 + ], + "type": "text", + "content": "[858] Y. Gan, Y. Yang, Z. Ma, P. He, R. Zeng, Y. Wang, Q. Li, C. Zhou, S. Li, T. Wang et al., \"Navigating the risks: A survey of security, privacy, and ethics threats in lmbased agents,\" arXiv preprint arXiv:2411.09523, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 528, + 301, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 528, + 301, + 573 + ], + "spans": [ + { + "bbox": [ + 47, + 528, + 301, + 573 + ], + "type": "text", + "content": "[859] Z. Deng, Y. Guo, C. Han, W. Ma, J. Xiong, S. Wen, and Y. Xiang, \"Ai agents under threat: A survey of key security challenges and future pathways,\" ACM Computing Surveys, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 574, + 301, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 574, + 301, + 618 + ], + "spans": [ + { + "bbox": [ + 47, + 574, + 301, + 618 + ], + "type": "text", + "content": "[860] R. Ye, S. Tang, R. Ge, Y. Du, Z. Yin, S. Chen, and J. Shao, \"Mas-gpt: Training llms to build llm-based multi-agent systems,\" arXiv preprint arXiv:2503.03686, 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 620, + 301, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 620, + 301, + 666 + ], + "spans": [ + { + "bbox": [ + 47, + 620, + 301, + 666 + ], + "type": "text", + "content": "[861] J. Zhang, J. Xiang, Z. Yu, F. Teng, X. Chen, J. Chen, M. Zhuge, X. Cheng, S. Hong, J. Wang et al., \"Aflow: Automating agentic workflow generation,\" arXiv preprint arXiv:2410.10762, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 666, + 301, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 666, + 301, + 700 + ], + "spans": [ + { + "bbox": [ + 47, + 666, + 301, + 700 + ], + "type": "text", + "content": "[862] L. Panait and S. Luke, \"Cooperative multi-agent learning: The state of the art,\" Autonomous agents and multiagent systems, vol. 11, pp. 387-434, 2005." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 701, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 701, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 701, + 301, + 746 + ], + "type": "text", + "content": "[863] L. Hammond, A. Chan, J. Clifton, J. Hoelscher-Obermaier, A. Khan, E. McLean, C. Smith, W. Barfuss, J. Foerster, T. Gavencciak et al., \"Multi-agent risks from advanced ai,\" arXiv preprint arXiv:2502.14143, 2025." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 310, + 42, + 564, + 87 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 42, + 564, + 87 + ], + "spans": [ + { + "bbox": [ + 310, + 42, + 564, + 87 + ], + "type": "text", + "content": "[864] R. Xu, X. Li, S. Chen, and W. Xu, \"Nuclear deployed: Analyzing catastrophic risks in decision-making of autonomous llm agents,\" arXiv preprint arXiv:2502.11355, 2025." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 89, + 564, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 89, + 564, + 134 + ], + "spans": [ + { + "bbox": [ + 310, + 89, + 564, + 134 + ], + "type": "text", + "content": "[865] Z. Zhou, Z. Li, J. Zhang, Y. Zhang, K. Wang, Y. Liu, and Q. Guo, \"Corba: Contagious recursive blocking attacks on multi-agent systems based on large language models,\" arXiv preprint arXiv:2502.14529, 2025." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 135, + 564, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 135, + 564, + 180 + ], + "spans": [ + { + "bbox": [ + 310, + 135, + 564, + 180 + ], + "type": "text", + "content": "[866] Z. Tan, C. Zhao, R. Moraffah, Y. Li, Y. Kong, T. Chen, and H. Liu, \"The wolf within: Covert injection of malice into mllm societies via an mllm operative,\" arXiv preprint arXiv:2402.14859, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 181, + 564, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 181, + 564, + 227 + ], + "spans": [ + { + "bbox": [ + 310, + 181, + 564, + 227 + ], + "type": "text", + "content": "[867] M. Yu, S. Wang, G. Zhang, J. Mao, C. Yin, Q. Liu, Q. Wen, K. Wang, and Y. Wang, \"Netsafe: Exploring the topological safety of multi-agent networks,\" arXiv preprint arXiv:2410.15686, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 228, + 564, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 228, + 564, + 273 + ], + "spans": [ + { + "bbox": [ + 310, + 228, + 564, + 273 + ], + "type": "text", + "content": "[868] J.-t. Huang, J. Zhou, T. Jin, X. Zhou, Z. Chen, W. Wang, Y. Yuan, M. Sap, and M. R. Lyu, \"On the resilience of multi-agent systems with malicious agents,\" arXiv preprint arXiv:2408.00989, 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 274, + 564, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 274, + 564, + 308 + ], + "spans": [ + { + "bbox": [ + 310, + 274, + 564, + 308 + ], + "type": "text", + "content": "[869] P. He, Y. Lin, S. Dong, H. Xu, Y. Xing, and H. Liu, \"Red-teaming llm multi-agent systems via communication attacks,\" arXiv preprint arXiv:2502.14847, 2025." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 308, + 564, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 308, + 564, + 342 + ], + "spans": [ + { + "bbox": [ + 310, + 308, + 564, + 342 + ], + "type": "text", + "content": "[870] Y. Tian, X. Yang, J. Zhang, Y. Dong, and H. Su, \"Evil geniuses: Delving into the safety of llm-based agents,\" arXiv preprint arXiv:2311.11855, 2023." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 343, + 564, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 343, + 564, + 399 + ], + "spans": [ + { + "bbox": [ + 310, + 343, + 564, + 399 + ], + "type": "text", + "content": "[871] A. Amayuelas, X. Yang, A. Antoniades, W. Hua, L. Pan, and W. Wang, \"Multiagent collaboration attack: Investigating adversarial attacks in large language model collaborations via debate,\" arXiv preprint arXiv:2406.14711, 2024." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 400, + 564, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 400, + 564, + 446 + ], + "spans": [ + { + "bbox": [ + 310, + 400, + 564, + 446 + ], + "type": "text", + "content": "[872] T. Ju, Y. Wang, X. Ma, P. Cheng, H. Zhao, Y. Wang, L. Liu, J. Xie, Z. Zhang, and G. Liu, \"Flooding spread of manipulated knowledge in llm-based multi-agent communities,\" arXiv preprint arXiv:2407.07791, 2024." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 447, + 564, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 447, + 564, + 480 + ], + "spans": [ + { + "bbox": [ + 310, + 447, + 564, + 480 + ], + "type": "text", + "content": "[873] G. Lin and Q. Zhao, \"Large language model sentinel: Llm agent for adversarial purification,\" arXiv preprint arXiv:2405.20770, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 481, + 564, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 481, + 564, + 515 + ], + "spans": [ + { + "bbox": [ + 310, + 481, + 564, + 515 + ], + "type": "text", + "content": "[874] Y. Zeng, Y. Wu, X. Zhang, H. Wang, and Q. Wu, \"Autodefense: Multi-agent llm defense against jailbreak attacks,\" arXiv preprint arXiv:2403.04783, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 516, + 564, + 549 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 516, + 564, + 549 + ], + "spans": [ + { + "bbox": [ + 310, + 516, + 564, + 549 + ], + "type": "text", + "content": "[875] S. Chern, Z. Fan, and A. Liu, \"Combating adversarial attacks with multi-agent debate,\" arXiv preprint arXiv:2401.05998, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 550, + 564, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 550, + 564, + 608 + ], + "spans": [ + { + "bbox": [ + 310, + 550, + 564, + 608 + ], + "type": "text", + "content": "[876] B. Chen, G. Li, X. Lin, Z. Wang, and J. Li, \"Blockagents: Towards byzantine-robust llm-based multi-agent coordination via blockchain,\" in Proceedings of the ACM Turing Award Celebration Conference-China 2024, 2024, pp. 187-192." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 609, + 564, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 609, + 564, + 653 + ], + "spans": [ + { + "bbox": [ + 310, + 609, + 564, + 653 + ], + "type": "text", + "content": "[877] C. Song, L. Ma, J. Zheng, J. Liao, H. Kuang, and L. Yang, \"Audit-llm: Multi-agent collaboration for log-based insider threat detection,\" arXiv preprint arXiv:2408.08902, 2024." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 654, + 564, + 710 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 654, + 564, + 710 + ], + "spans": [ + { + "bbox": [ + 310, + 654, + 564, + 710 + ], + "type": "text", + "content": "[878] S. Wang, G. Zhang, M. Yu, G. Wan, F. Meng, C. Guo, K. Wang, and Y. Wang, \"G-safeguard: A topology-guided security lens and treatment on llm-based multi-agent systems,\" arXiv preprint arXiv:2502.11127, 2025." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 712, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 712, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 712, + 564, + 746 + ], + "type": "text", + "content": "[879] Z. Wu, S. Pan, F. Chen, G. Long, C. Zhang, and S. Y. Philip, \"A comprehensive survey on graph neural networks,\" IEEE transactions on neural networks and" + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "66" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 65 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 71, + 42, + 263, + 54 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 263, + 54 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 263, + 54 + ], + "type": "text", + "content": "learning systems, vol. 32, no. 1, pp. 4-24, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 54, + 301, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 54, + 301, + 99 + ], + "spans": [ + { + "bbox": [ + 47, + 54, + 301, + 99 + ], + "type": "text", + "content": "[880] X. Zheng, Y. Wang, Y. Liu, M. Li, M. Zhang, D. Jin, P. S. Yu, and S. Pan, \"Graph neural networks for graphs with heterophily: A survey,\" arXiv preprint arXiv:2202.07082, 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 100, + 301, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 100, + 301, + 157 + ], + "spans": [ + { + "bbox": [ + 47, + 100, + 301, + 157 + ], + "type": "text", + "content": "[881] M. R. Genesereth and S. P. Ketchpel, \"The kqml protocol: A specification of language and communication,\" in Proceedings of the Third International Conference on Information and Knowledge Management (CIKM). ACM, 1993, pp. 1-10." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 158, + 301, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 158, + 301, + 239 + ], + "spans": [ + { + "bbox": [ + 47, + 158, + 301, + 239 + ], + "type": "text", + "content": "[882] D. S. Milojicic, M. Breugst, I. Busse, J. Campbell, S. Covaci, B. Friedman, K. Kosaka, D. B. Lange, K. Ono, M. Oshima, C. Tham, S. Virdhagriswaran, and J. White, \"Masif: The omg mobile agent system interoperability facility,\" in Proceedings of the Second International Workshop on Mobile Agents, ser. MA '98. Berlin, Heidelberg: Springer-Verlag, 1998, p. 50-67." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 239, + 301, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 239, + 301, + 274 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 301, + 274 + ], + "type": "text", + "content": "[883] F. for Intelligent Physical Agents, \"Fipa communicative act library specification,\" https://www.fipa.org/specs/fipa00037/SC00037J.html, 2000." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 274, + 301, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 274, + 301, + 308 + ], + "spans": [ + { + "bbox": [ + 47, + 274, + 301, + 308 + ], + "type": "text", + "content": "[884] F. Curbera, M. Duftler, R. Khalaf, W. Nagy, N. Mukhi, and S. Weerawarana, \"Web services: Why and how,\" IBM Systems Journal, vol. 41, no. 2, pp. 170-177, 2002." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 308, + 301, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 301, + 354 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 301, + 354 + ], + "type": "text", + "content": "[885] G. Hohpe and B. Woolf, Enterprise Integration Patterns: Designing, Building, and Deploying Messaging Solutions, ser. Addison-Wesley Signature Series (Fowler). Addison-Wesley Professional, 2006." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 354, + 301, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 354, + 301, + 422 + ], + "spans": [ + { + "bbox": [ + 47, + 354, + 301, + 422 + ], + "type": "text", + "content": "[886] P. Lewis, E. Perez, A. Piktus, F. Petroni, V. Karpukhin, N. Goyal, H. Kuttler, M. Lewis, W.-t. Yih, T. Rocktäschel et al., \"Retrieval-augmented generation for knowledge-intensive nlp tasks,\" Advances in neural information processing systems, vol. 33, pp. 9459-9474, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 423, + 301, + 458 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 423, + 301, + 458 + ], + "spans": [ + { + "bbox": [ + 47, + 423, + 301, + 458 + ], + "type": "text", + "content": "[887] G. Izacard and E. Grave, \"Towards an efficient pipeline for knowledge-intensive nlp tasks,\" arXiv preprint arXiv:2112.04426, 2021." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 459, + 301, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 459, + 301, + 493 + ], + "spans": [ + { + "bbox": [ + 47, + 459, + 301, + 493 + ], + "type": "text", + "content": "[888] H. Chase, \"Langchain: Build applications with llms through composability,\" https://github.com/ langchain-ai/langchain, 2022, accessed: Apr. 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 493, + 301, + 527 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 493, + 301, + 527 + ], + "spans": [ + { + "bbox": [ + 47, + 493, + 301, + 527 + ], + "type": "text", + "content": "[889] J. Wu et al., \"Llamaindex: Connecting llms to your knowledge,\" https://github.com/jerryjliu/llama_index, 2023, accessed: Apr. 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 527, + 301, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 527, + 301, + 562 + ], + "spans": [ + { + "bbox": [ + 47, + 527, + 301, + 562 + ], + "type": "text", + "content": "[890] OpenAI, \"Function calling in openerai models,\" https://platform.openai.com/docs/guides/functions, 2023, accessed: Apr. 2025." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 563, + 301, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 563, + 301, + 597 + ], + "spans": [ + { + "bbox": [ + 47, + 563, + 301, + 597 + ], + "type": "text", + "content": "[891] Anthropic, \"Model context protocol,\" 2024, accessed: 2025-04-19. [Online]. Available: https://www.anthropic.com/news/model-context-protocol" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 597, + 301, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 597, + 301, + 631 + ], + "spans": [ + { + "bbox": [ + 47, + 597, + 301, + 631 + ], + "type": "text", + "content": "[892] Google, \"A2a: Agent2agent protocol,\" 2025, accessed: 2025-04-21. [Online]. Available: https://github.com/google/A2A" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 632, + 301, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 632, + 301, + 666 + ], + "spans": [ + { + "bbox": [ + 47, + 632, + 301, + 666 + ], + "type": "text", + "content": "[893] G. Chang, \"Anp: Agent network protocol,\" 2024, accessed: 2025-04-21. [Online]. Available: https://www(agent-network-protocol.com/" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 666, + 301, + 699 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 666, + 301, + 699 + ], + "spans": [ + { + "bbox": [ + 47, + 666, + 301, + 699 + ], + "type": "text", + "content": "[894] WildCardAI, \"agents.json specification,\" https://github.com/wild-card-ai/agents.json, 2025, accessed: 2025-04-22." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 700, + 301, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 700, + 301, + 734 + ], + "spans": [ + { + "bbox": [ + 47, + 700, + 301, + 734 + ], + "type": "text", + "content": "[895] NEAR, \"Aitp: Agent interaction & transaction protocol,\" 2025, accessed: 2025-04-22. [Online]. Available: https://aitp.dev/" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 735, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 735, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 47, + 735, + 301, + 747 + ], + "type": "text", + "content": "[896] L. F. Al and L. Data, \"Acp: Agent communication pro" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 747 + ], + "type": "list", + "angle": 0, + "index": 38, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "type": "text", + "content": "tocol,\" 2025, accessed: 2025-04-22. [Online]. Available: https://github.com/orgs/i-am-bee/discussions/284" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 66, + 564, + 100 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 66, + 564, + 100 + ], + "spans": [ + { + "bbox": [ + 310, + 66, + 564, + 100 + ], + "type": "text", + "content": "[897] G. Cisco, Langchain, \"Acp: Agent connect protocol,\" 2025, accessed: 2025-04-22. [Online]. Available: https://spec.acp.agntcy.org/" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 101, + 564, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 101, + 564, + 157 + ], + "spans": [ + { + "bbox": [ + 310, + 101, + 564, + 157 + ], + "type": "text", + "content": "[898] S. Marro, E. L. Malfa, J. Wright, G. Li, N. Shadbolt, M. Wooldridge, and P. Torr, \"A scalable communication protocol for networks of large language models,\" 2024. [Online]. Available: https://arxiv.org/abs/2410.11905" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 158, + 564, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 158, + 564, + 191 + ], + "spans": [ + { + "bbox": [ + 310, + 158, + 564, + 191 + ], + "type": "text", + "content": "[899] Eclipse, \"Language model operating system (lmos),\" https://eclipse.dev/lmos/, 2025, accessed: 2025-04-22." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 192, + 564, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 192, + 564, + 216 + ], + "spans": [ + { + "bbox": [ + 310, + 192, + 564, + 216 + ], + "type": "text", + "content": "[900] AlEngineerFoundation, \"Agent protocol,\" https://agentprotocol.ai/, 2025, accessed: 2025-04-22." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 216, + 564, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 216, + 564, + 262 + ], + "spans": [ + { + "bbox": [ + 310, + 216, + 564, + 262 + ], + "type": "text", + "content": "[901] R. Ranjan, S. Gupta, and S. N. Singh, \"Loka protocol: A decentralized framework for trustworthy and ethical ai agent ecosystems,\" 2025. [Online]. Available: https://arxiv.org/abs/2504.10915" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 262, + 564, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 262, + 564, + 308 + ], + "spans": [ + { + "bbox": [ + 310, + 262, + 564, + 308 + ], + "type": "text", + "content": "[902] A. Srinivasan, K. Bania, S. V, H. Mestha, and S. Liu, \"Implementation and application of an intelligibility protocol for interaction with an llm,\" 2024. [Online]. Available: https://arxiv.org/abs/2410.20600" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 308, + 564, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 308, + 564, + 342 + ], + "spans": [ + { + "bbox": [ + 310, + 308, + 564, + 342 + ], + "type": "text", + "content": "[903] I. Bae, J. Lee, and H.-G. Jeon, \"Continuous locomotive crowd behavior generation,\" 2025. [Online]. Available: https://arxiv.org/abs/2504.04756" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 343, + 564, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 343, + 564, + 400 + ], + "spans": [ + { + "bbox": [ + 310, + 343, + 564, + 400 + ], + "type": "text", + "content": "[904] L. Gąsieniec, Łukasz Kuszner, E. Latif, R. Parasuraman, P. Spirakis, and G. Stachowiak, \"Anonymous distributed localisation via spatial population protocols,\" 2024. [Online]. Available: https://arxiv.org/abs/2411.08434" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 401, + 564, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 401, + 564, + 456 + ], + "spans": [ + { + "bbox": [ + 310, + 401, + 564, + 456 + ], + "type": "text", + "content": "[905] J. Tu, T. Wang, J. Wang, S. Manivasagam, M. Ren, and R. Urtasun, \"Adversarial attacks on multi-agent communication,\" in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2021, pp. 7768-7777." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 458, + 564, + 515 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 458, + 564, + 515 + ], + "spans": [ + { + "bbox": [ + 310, + 458, + 564, + 515 + ], + "type": "text", + "content": "[906] L. Yuan, F. Chen, Z. Zhang, and Y. Yu, \"Communication-robust multi-agent learning by adaptable auxiliary multi-agent adversary generation,\" Frontiers of Computer Science, vol. 18, no. 6, p. 186331, 2024." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 516, + 564, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 516, + 564, + 562 + ], + "spans": [ + { + "bbox": [ + 310, + 516, + 564, + 562 + ], + "type": "text", + "content": "[907] J. Blumenkamp and A. Prorok, \"The emergence of adversarial communication in multi-agent reinforcement learning,\" in Conference on Robot Learning. PMLR, 2021, pp. 1394-1414." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 563, + 564, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 563, + 564, + 608 + ], + "spans": [ + { + "bbox": [ + 310, + 563, + 564, + 608 + ], + "type": "text", + "content": "[908] Z. Chen, Z. Xiang, C. Xiao, D. Song, and B. Li, \"Agent-poison: Red-teaming llm agents via poisoning memory or knowledge bases,\" in The Thirty-eighth Annual Conference on Neural Information Processing Systems." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 609, + 564, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 609, + 564, + 643 + ], + "spans": [ + { + "bbox": [ + 310, + 609, + 564, + 643 + ], + "type": "text", + "content": "[909] X. Pan, J. Dai, Y. Fan, and M. Yang, \"Frontier ai systems have surpassed the self-replicating red line,\" arXiv preprint arXiv:2412.12140, 2024." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 310, + 643, + 564, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 643, + 564, + 700 + ], + "spans": [ + { + "bbox": [ + 310, + 643, + 564, + 700 + ], + "type": "text", + "content": "[910] L. Yu, Y. Qiu, Q. Yao, Y. Shen, X. Zhang, and J. Wang, \"Robust communicative multi-agent reinforcement learning with active defense,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 38, no. 16, 2024, pp. 17575-17582." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 310, + 701, + 564, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 701, + 564, + 734 + ], + "spans": [ + { + "bbox": [ + 310, + 701, + 564, + 734 + ], + "type": "text", + "content": "[911] J. Light, M. Cai, S. Shen, and Z. Hu, \"Avalonbench: Evaluating llms playing the game of avalon,\" arXiv preprint arXiv:2310.05036, 2023." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 310, + 735, + 564, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 735, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 310, + 735, + 564, + 747 + ], + "type": "text", + "content": "[912] Q. Xie, Q. Feng, T. Zhang, Q. Li, L. Yang, Y. Zhang," + } + ] + } + ], + "index": 37 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "67" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 66 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 42, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 71, + 42, + 299, + 77 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 42, + 299, + 77 + ], + "spans": [ + { + "bbox": [ + 71, + 42, + 299, + 77 + ], + "type": "text", + "content": "R. Feng, L. He, S. Gao, and Y. Zhang, \"Human simulacra: Benchmarking the personification of large language models,\" arXiv preprint arXiv:2402.18180, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 77, + 301, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 77, + 301, + 112 + ], + "spans": [ + { + "bbox": [ + 46, + 77, + 301, + 112 + ], + "type": "text", + "content": "[913] L. Geng and E. Y. Chang, \"Realm-bench: A real-world planning benchmark for llms and multi-agent systems,\" arXiv preprint arXiv:2502.18836, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 112, + 301, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 112, + 301, + 157 + ], + "spans": [ + { + "bbox": [ + 47, + 112, + 301, + 157 + ], + "type": "text", + "content": "[914] Y. Dubois, B. Galambosi, P. Liang, and T. B. Hashimoto, \"Length-controlled alpacaeval: A simple way to debias automatic evaluators,\" arXiv preprint arXiv:2404.04475, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 158, + 301, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 158, + 301, + 215 + ], + "spans": [ + { + "bbox": [ + 47, + 158, + 301, + 215 + ], + "type": "text", + "content": "[915] W. Wang, J. Shi, C. Wang, C. Lee, Y. Yuan, J.-T. Huang, and M. R. Lyu, \"Learning to ask: When llms meet unclear instruction,\" ArXiv, vol. abs/2409.00557, 2024. [Online]. Available: https://api-semanticscholar.org/CorpusID:272368496" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 216, + 301, + 273 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 216, + 301, + 273 + ], + "spans": [ + { + "bbox": [ + 47, + 216, + 301, + 273 + ], + "type": "text", + "content": "[916] C. Guo, X. Liu, C. Xie, A. Zhou, Y. Zeng, Z. Lin, D. Song, and B. Li, \"Redcode: Risky code execution and generation benchmark for code agents,\" Advances in Neural Information Processing Systems, vol. 37, pp. 106-190-106-236, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 274, + 301, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 274, + 301, + 330 + ], + "spans": [ + { + "bbox": [ + 47, + 274, + 301, + 330 + ], + "type": "text", + "content": "[917] X. Yuan, J. Li, D. Wang, Y. Chen, X. Mao, L. Huang, H. Xue, W. Wang, K. Ren, and J. Wang, \"S-eval: Automatic and adaptive test generation for benchmarking safety evaluation of large language models,\" arXiv preprint arXiv:2405.14191, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 331, + 301, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 331, + 301, + 376 + ], + "spans": [ + { + "bbox": [ + 47, + 331, + 301, + 376 + ], + "type": "text", + "content": "[918] D. Dorn, A. Variengien, C.-R. Segerie, and V. Corruble, \"Bells: A framework towards future proof benchmarks for the evaluation of llm safeguards,\" arXiv preprint arXiv:2406.01364, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 377, + 301, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 377, + 301, + 422 + ], + "spans": [ + { + "bbox": [ + 47, + 377, + 301, + 422 + ], + "type": "text", + "content": "[919] Y. Shao, T. Li, W. Shi, Y. Liu, and D. Yang, \"Privacylens: Evaluating privacy norm awareness of language models in action,\" arXiv preprint arXiv:2409.00138, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 423, + 301, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 423, + 301, + 468 + ], + "spans": [ + { + "bbox": [ + 47, + 423, + 301, + 468 + ], + "type": "text", + "content": "[920] Q. Zhan, Z. Liang, Z. Ying, and D. Kang, \"Injecagent: Benchmarking indirect prompt injections in tool-integrated large language model agents,\" arXiv preprint arXiv:2403.02691, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 469, + 301, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 469, + 301, + 525 + ], + "spans": [ + { + "bbox": [ + 47, + 469, + 301, + 525 + ], + "type": "text", + "content": "[921] Z. Zhu, B. Wu, Z. Zhang, and B. Wu, \"Riskawarebench: Towards evaluating physical risk awareness for high-level planning of llm-based embodied agents,\" arXiv e-prints, pp. arXiv-2408, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 526, + 301, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 526, + 301, + 562 + ], + "spans": [ + { + "bbox": [ + 47, + 526, + 301, + 562 + ], + "type": "text", + "content": "[922] Z. Zhang, S. Cui, Y. Lu, J. Zhou, J. Yang, H. Wang, and M. Huang, \"Agent-safetybench: Evaluating the safety of llm agents,\" arXiv preprint arXiv:2412.14470, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 562, + 301, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 562, + 301, + 619 + ], + "spans": [ + { + "bbox": [ + 47, + 562, + 301, + 619 + ], + "type": "text", + "content": "[923] M. Andriushchenko, A. Souly, M. Dziemian, D. Duenas, M. Lin, J. Wang, D. Hendrycks, A. Zou, Z. Kolter, M. Fredrikson et al., \"Agentharm: A benchmark for measuring harmfulness of llm agents,\" arXiv preprint arXiv:2410.09024, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 620, + 301, + 666 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 620, + 301, + 666 + ], + "spans": [ + { + "bbox": [ + 47, + 620, + 301, + 666 + ], + "type": "text", + "content": "[924] J. Ye, S. Li, G. Li, C. Huang, S. Gao, Y. Wu, Q. Zhang, T. Gui, and X. Huang, \"Toolsword: Unveiling safety issues of large language models in tool learning across three stages,\" arXiv preprint arXiv:2402.10753, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 666, + 301, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 666, + 301, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 666, + 301, + 712 + ], + "type": "text", + "content": "[925] Y. Ruan, H. Dong, A. Wang, S. Pitis, Y. Zhou, J. Ba, Y. Dubois, C. J. Maddison, and T. Hashimoto, \"Identifying the risks of lm agents with an lm-emulated sandbox,\" arXiv preprint arXiv:2309.15817, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "type": "text", + "content": "[926] X. Zhou, H. Kim, F. Brahman, L. Jiang, H. Zhu, X. Lu, F. Xu, B. Y. Lin, Y. Choi, N. Mireshghallah et al., \"Haicosystem: An ecosystem for sandboxing" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 747 + ], + "type": "list", + "angle": 0, + "index": 34, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "type": "text", + "content": "safety risks in human-ai interactions,\" arXiv preprint arXiv:2409.16427, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 65, + 564, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 65, + 564, + 112 + ], + "spans": [ + { + "bbox": [ + 310, + 65, + 564, + 112 + ], + "type": "text", + "content": "[927] S. Yin, X. Pang, Y. Ding, M. Chen, Y. Bi, Y. Xiong, W. Huang, Z. Xiang, J. Shao, and S. Chen, \"Safeagent-bench: A benchmark for safe task planning of embodied llm agents,\" arXiv preprint arXiv:2412.13178, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 112, + 564, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 112, + 564, + 146 + ], + "spans": [ + { + "bbox": [ + 310, + 112, + 564, + 146 + ], + "type": "text", + "content": "[928] J. BENCHMARK, \"Jailjudge: A comprehensive jailbreak judge benchmark with multi-agent enhanced explanation evaluation framework.\"" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 146, + 564, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 146, + 564, + 191 + ], + "spans": [ + { + "bbox": [ + 310, + 146, + 564, + 191 + ], + "type": "text", + "content": "[929] P. Y. Zhong, S. Chen, R. Wang, M. McCall, B. L. Titzer, and H. Miller, \"Rtbas: Defending llm agents against prompt injection and privacy leakage,\" arXiv preprint arXiv:2502.08966, 2025." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 192, + 564, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 192, + 564, + 250 + ], + "spans": [ + { + "bbox": [ + 310, + 192, + 564, + 250 + ], + "type": "text", + "content": "[930] A. Liu, Y. Zhou, X. Liu, T. Zhang, S. Liang, J. Wang, Y. Pu, T. Li, J. Zhang, W. Zhou et al., \"Compromising lvm driven embodied agents with contextual backdoor attacks,\" IEEE Transactions on Information Forensics and Security, 2025." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 251, + 564, + 284 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 251, + 564, + 284 + ], + "spans": [ + { + "bbox": [ + 310, + 251, + 564, + 284 + ], + "type": "text", + "content": "[931] —, \"Compromising embodied agents with contextual backdoor attacks,\" arXiv preprint arXiv:2408.02882, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 285, + 564, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 285, + 564, + 330 + ], + "spans": [ + { + "bbox": [ + 310, + 285, + 564, + 330 + ], + "type": "text", + "content": "[932] H. Zhang, C. Zhu, X. Wang, Z. Zhou, S. Hu, and L. Y. Zhang, \"Badrobot: Jailbreaking llm-based embodied ai in the physical world,\" arXiv preprint arXiv:2407.20242, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 331, + 564, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 331, + 564, + 376 + ], + "spans": [ + { + "bbox": [ + 310, + 331, + 564, + 376 + ], + "type": "text", + "content": "[933] W. Shen, C. Li, H. Chen, M. Yan, X. Quan, H. Chen, J. Zhang, and F. Huang, \"Small llms are weak tool learners: A multi-llm agent,\" arXiv preprint arXiv:2401.07324, 2024." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 376, + 564, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 376, + 564, + 422 + ], + "spans": [ + { + "bbox": [ + 310, + 376, + 564, + 422 + ], + "type": "text", + "content": "[934] S. Yuan, K. Song, J. Chen, X. Tan, Y. Shen, R. Kan, D. Li, and D. Yang, \"Easytool: Enhancing llm-based agents with concise tool instruction,\" arXiv preprint arXiv:2401.06201, 2024." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 423, + 564, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 423, + 564, + 480 + ], + "spans": [ + { + "bbox": [ + 310, + 423, + 564, + 480 + ], + "type": "text", + "content": "[935] S. Wu, S. Zhao, Q. Huang, K. Huang, M. Yasunaga, K. Cao, V. Ioannidis, K. Subbian, J. Leskovec, and J. Y. Zou, \"Avatar: Optimizing llm agents for tool usage via contrastive reasoning,\" Advances in Neural Information Processing Systems, vol. 37, pp. 25981-26010, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 481, + 564, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 481, + 564, + 504 + ], + "spans": [ + { + "bbox": [ + 310, + 481, + 564, + 504 + ], + "type": "text", + "content": "[936] Z. Shen, \"Llm with tools: A survey,\" arXiv preprint arXiv:2409.18807, 2024." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 504, + 564, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 504, + 564, + 550 + ], + "spans": [ + { + "bbox": [ + 310, + 504, + 564, + 550 + ], + "type": "text", + "content": "[937] C. Qian, W. Liu, H. Liu, N. Chen, Y. Dang, J. Li, C. Yang, W. Chen, Y. Su, X. Cong et al., \"Chatdev: Communicative agents for software development,\" arXiv preprint arXiv:2307.07924, 2023." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 550, + 564, + 607 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 550, + 564, + 607 + ], + "spans": [ + { + "bbox": [ + 310, + 550, + 564, + 607 + ], + "type": "text", + "content": "[938] Z. M. Wang, Z. Peng, H. Que, J. Liu, W. Zhou, Y. Wu, H. Guo, R. Gan, Z. Ni, J. Yang et al., \"Rolellm: Benchmarking, eliciting, and enhancing role-playing abilities of large language models,\" arXiv preprint arXiv:2310.00746, 2023." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 608, + 564, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 608, + 564, + 665 + ], + "spans": [ + { + "bbox": [ + 310, + 608, + 564, + 665 + ], + "type": "text", + "content": "[939] J. Zhou, Z. Chen, D. Wan, B. Wen, Y. Song, J. Yu, Y. Huang, L. Peng, J. Yang, X. Xiao et al., \"Characterglm: Customizing chinese conversational ai characters with large language models,\" arXiv preprint arXiv:2311.16832, 2023." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 666, + 564, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 666, + 564, + 712 + ], + "spans": [ + { + "bbox": [ + 310, + 666, + 564, + 712 + ], + "type": "text", + "content": "[940] Z. Chen, K. Liu, Q. Wang, W. Zhang, J. Liu, D. Lin, K. Chen, and F. Zhao, \"Agent-flan: Designing data and methods of effective agent tuning for large language models,\" arXiv preprint arXiv:2403.12881, 2024." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 712, + 564, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 712, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 310, + 712, + 564, + 747 + ], + "type": "text", + "content": "[941] G. Zhang, L. Niu, J. Fang, K. Wang, L. Bai, and X. Wang, \"Multi-agent architecture search via agentic supernet,\" arXiv preprint arXiv:2502.04180, 2025." + } + ] + } + ], + "index": 33 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "68" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 67 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 47, + 42, + 301, + 77 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 42, + 301, + 77 + ], + "spans": [ + { + "bbox": [ + 47, + 42, + 301, + 77 + ], + "type": "text", + "content": "[942] L. P. Kaelbling, M. L. Littman, and A. W. Moore, \"Reinforcement learning: A survey,\" Journal of artificial intelligence research, vol. 4, pp. 237-285, 1996." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 77, + 301, + 100 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 77, + 301, + 100 + ], + "spans": [ + { + "bbox": [ + 47, + 77, + 301, + 100 + ], + "type": "text", + "content": "[943] Y. Li, \"Deep reinforcement learning: An overview,\" arXiv preprint arXiv:1701.07274, 2017." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 100, + 301, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 100, + 301, + 134 + ], + "spans": [ + { + "bbox": [ + 47, + 100, + 301, + 134 + ], + "type": "text", + "content": "[944] X. Li, Y. Fan, and S. Cheng, \"Aigc in china: Current developments and future outlook,\" arXiv preprint arXiv:2308.08451, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 134, + 301, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 134, + 301, + 179 + ], + "spans": [ + { + "bbox": [ + 47, + 134, + 301, + 179 + ], + "type": "text", + "content": "[945] X. Sun, L. Dong, X. Li, Z. Wan, S. Wang, T. Zhang, J. Li, F. Cheng, L. Lyu, F. Wu et al., \"Pushing the limits of chatgpt on nlp tasks,\" arXiv preprint arXiv:2306.09719, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 180, + 301, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 180, + 301, + 239 + ], + "spans": [ + { + "bbox": [ + 47, + 180, + 301, + 239 + ], + "type": "text", + "content": "[946] G. Sriramanan, S. Bharti, V. S. Sadasivan, S. Saha, P. Kattakinda, and S. Feizi, \"Llm-check: Investigating detection of hallucinations in large language models,\" Advances in Neural Information Processing Systems, vol. 37, pp. 34188-34216, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 239, + 301, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 239, + 301, + 297 + ], + "spans": [ + { + "bbox": [ + 47, + 239, + 301, + 297 + ], + "type": "text", + "content": "[947] K. Zheng, J. Chen, Y. Yan, X. Zou, and X. Hu, \"Reefknot: A comprehensive benchmark for relation hallucination evaluation, analysis and mitigation in multimodal large language models,\" arXiv preprint arXiv:2408.09429, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 297, + 301, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 297, + 301, + 354 + ], + "spans": [ + { + "bbox": [ + 47, + 297, + 301, + 354 + ], + "type": "text", + "content": "[948] X. Zou, Y. Wang, Y. Yan, S. Huang, K. Zheng, J. Chen, C. Tang, and X. Hu, \"Look twice before you answer: Memory-space visual retracing for hallucination mitigation in multimodal large language models,\" arXiv preprint arXiv:2410.03577, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 354, + 301, + 411 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 354, + 301, + 411 + ], + "spans": [ + { + "bbox": [ + 47, + 354, + 301, + 411 + ], + "type": "text", + "content": "[949] G. Zhou, Y. Yan, X. Zou, K. Wang, A. Liu, and X. Hu, \"Mitigating modality prior-induced hallucinations in multimodal large language models via deciphering attention causality,\" arXiv preprint arXiv:2410.04780, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 412, + 301, + 470 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 412, + 301, + 470 + ], + "spans": [ + { + "bbox": [ + 47, + 412, + 301, + 470 + ], + "type": "text", + "content": "[950] W. Wang, Z. Ma, Z. Wang, C. Wu, W. Chen, X. Li, and Y. Yuan, \"A survey of llm-based agents in medicine: How far are we from baymax?\" ArXiv, vol. abs/2502.11211, 2025. [Online]. Available: https://api.sementicscholar.org/CorpusID:276408182" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 470, + 301, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 470, + 301, + 504 + ], + "spans": [ + { + "bbox": [ + 47, + 470, + 301, + 504 + ], + "type": "text", + "content": "[951] H. Kang and X.-Y. Liu, \"Deficiency of large language models in finance: An empirical examination of hallucination,\" arXiv preprint arXiv:2311.15548, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 504, + 301, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 504, + 301, + 584 + ], + "spans": [ + { + "bbox": [ + 47, + 504, + 301, + 584 + ], + "type": "text", + "content": "[952] L. Ouyang, J. Wu, X. Jiang, D. Almeida, C. L. Wainwright, P. Mishkin, C. Zhang, S. Agarwal, K. Slama, A. Ray, J. Schulman, J. Hilton, F. Kelton, L. Miller, M. Simens, A. Askell, P. Welinder, P. F. Christiano, J. Leike, and R. Lowe, \"Training language models to follow instructions with human feedback,\" in NeurIPS, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 585, + 301, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 585, + 301, + 632 + ], + "spans": [ + { + "bbox": [ + 47, + 585, + 301, + 632 + ], + "type": "text", + "content": "[953] Y. Liu, Y. Yao, J.-F. Ton, X. Zhang, R. Guo, H. Cheng, Y. Klochkov, M. F. Taufiq, and H. Li, \"Trustworthy llms: a survey and guideline for evaluating large language models' alignment,\" 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 632, + 301, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 632, + 301, + 677 + ], + "spans": [ + { + "bbox": [ + 47, + 632, + 301, + 677 + ], + "type": "text", + "content": "[954] M. Hao, H. Li, H. Chen, P. Xing, G. Xu, and T. Zhang, \"Iron: Private inference on transformers,\" Advances in neural information processing systems, vol. 35, pp. 15718-15731, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 677, + 301, + 735 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 301, + 735 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 301, + 735 + ], + "type": "text", + "content": "[955] J. Huang, J.-T. Huang, Z. Liu, X. Liu, W. Wang, and J. Zhao, \"Vlms as geoguessr masters: Exceptional performance, hidden biases, and privacy risks,\" ArXiv, vol. abs/2502.11163, 2025. [Online]. Available: https://api.sementicscholar.org/CorpusID:276409319" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 735, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 735, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 47, + 735, + 301, + 747 + ], + "type": "text", + "content": "[956] G. Feretzakis and V. S. Verykios, \"Trustworthy ai:" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 42, + 564, + 632 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "spans": [ + { + "bbox": [ + 335, + 42, + 564, + 65 + ], + "type": "text", + "content": "Securing sensitive data in large language models,\" AI, vol. 5, no. 4, pp. 2773-2800, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 310, + 65, + 564, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 65, + 564, + 112 + ], + "spans": [ + { + "bbox": [ + 310, + 65, + 564, + 112 + ], + "type": "text", + "content": "[957] Q. Feng, S. R. Kasa, H. Yun, C. H. Teo, and S. B. Bodapati, \"Exposing privacy gaps: Membership inference attack on preference data for llm alignment,\" arXiv preprint arXiv:2407.06443, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 310, + 112, + 564, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 112, + 564, + 146 + ], + "spans": [ + { + "bbox": [ + 310, + 112, + 564, + 146 + ], + "type": "text", + "content": "[958] N. Rahman and E. Santacana, “Beyond fair use: Legal risk evaluation for training llms on copyrighted text,” in ICML Workshop on Generative AI and Law, 2023." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 310, + 146, + 564, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 146, + 564, + 204 + ], + "spans": [ + { + "bbox": [ + 310, + 146, + 564, + 204 + ], + "type": "text", + "content": "[959] J. Guo, Y. Li, R. Chen, Y. Wu, C. Liu, Y. Chen, and H. Huang, \"Towards copyright protection for knowledge bases of retrieval-augmented language models via ownership verification with reasoning,\" arXiv preprint arXiv:2502.10440, 2025." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 205, + 564, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 205, + 564, + 251 + ], + "spans": [ + { + "bbox": [ + 310, + 205, + 564, + 251 + ], + "type": "text", + "content": "[960] S. Shao, Y. Li, H. Yao, Y. He, Z. Qin, and K. Ren, \"Explanation as a watermark: Towards harmless and multi-bit model ownership verification via watermarking feature attribution,\" in NDSS, 2025." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 251, + 564, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 251, + 564, + 285 + ], + "spans": [ + { + "bbox": [ + 310, + 251, + 564, + 285 + ], + "type": "text", + "content": "[961] W. Xu, K. Gao, H. He, and M. Zhou, \"Licoeval: Evaluating llms on license compliance in code generation,\" arXiv preprint arXiv:2408.02487, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 285, + 564, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 285, + 564, + 331 + ], + "spans": [ + { + "bbox": [ + 310, + 285, + 564, + 331 + ], + "type": "text", + "content": "[962] W. Qu, W. Zheng, T. Tao, D. Yin, Y. Jiang, Z. Tian, W. Zou, J. Jia, and J. Zhang, \"Provably robust multi-bit watermarking for ai-generated text,\" arXiv preprint arXiv:2401.16820, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 331, + 564, + 377 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 331, + 564, + 377 + ], + "spans": [ + { + "bbox": [ + 310, + 331, + 564, + 377 + ], + "type": "text", + "content": "[963] J. Kirchenbauer, J. Geiping, Y. Wen, J. Katz, I. Miers, and T. Goldstein, \"A watermark for large language models,\" in International Conference on Machine Learning. PMLR, 2023, pp. 17061-17084." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 377, + 564, + 424 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 377, + 564, + 424 + ], + "spans": [ + { + "bbox": [ + 310, + 377, + 564, + 424 + ], + "type": "text", + "content": "[964] J. Ye, Y. Wang, Y. Huang, D. Chen, Q. Zhang, N. Moniz, T. Gao, W. Geyer, C. Huang, P.-Y. Chen et al., \"Justice or prejudice? quantifying biases in llm-as-a-judge,\" arXiv preprint arXiv:2410.02736, 2024." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 423, + 564, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 423, + 564, + 504 + ], + "spans": [ + { + "bbox": [ + 310, + 423, + 564, + 504 + ], + "type": "text", + "content": "[965] Y. Wan, W. Wang, P. He, J. Gu, H. Bai, and M. R. Lyu, \"Biasaker: Measuring the bias in conversational ai system,\" Proceedings of the 31st ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering, 2023. [Online]. Available: https://api-semanticscholar.org/CorpusID:258833296" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 504, + 564, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 504, + 564, + 539 + ], + "spans": [ + { + "bbox": [ + 310, + 504, + 564, + 539 + ], + "type": "text", + "content": "[966] European Union, \"Artificial intelligence act,\" 2024, accessed: 2025-03-07. [Online]. Available: https://artificialintelligenceact.eu/" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 539, + 564, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 539, + 564, + 596 + ], + "spans": [ + { + "bbox": [ + 310, + 539, + 564, + 596 + ], + "type": "text", + "content": "[967] Cyberspace Administration of China, \"Interim measures for the management of generative artificial intelligence services,\" 2023, accessed: 2025-03-07. [Online]. Available: https://www.cac.gov.cn/2023-07/13/c_1690898327029107.htm" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 596, + 564, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 596, + 564, + 632 + ], + "spans": [ + { + "bbox": [ + 310, + 596, + 564, + 632 + ], + "type": "text", + "content": "[968] The White House, \"Safe, secure, and trustworthy development and use of artificial intelligence,\" 2023, accessed: 2025-03-07." + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 258, + 35 + ], + "type": "text", + "content": "JOURNAL OF LATEX CLASS FILES, VOL. 14, NO. 8, AUGUST 2015" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "69" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 68 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15659/c7f88426-e408-4745-97f1-882178397313_content_list.json b/data/2025/2504_15xxx/2504.15659/c7f88426-e408-4745-97f1-882178397313_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..da63dc5799df23ee06bcfb3745f765c8903f5810 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15659/c7f88426-e408-4745-97f1-882178397313_content_list.json @@ -0,0 +1,1799 @@ +[ + { + "type": "text", + "text": "VeriCoder: Enhancing LLM-Based RTL Code Generation through Functional Correctness Validation", + "text_level": 1, + "bbox": [ + 99, + 70, + 897, + 130 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Anjiang Wei $①$ , Huanmi Tan $①$ , Tarun Suresh $①$ , Daniel Mendoza $①$ , Thiago S. F. X. Teixeira $①$ , Ke Wang $①$ , Caroline Trippel $①$ , and Alex Aiken $①$", + "bbox": [ + 210, + 142, + 785, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract—Recent advances in Large Language Models (LLMs) have sparked growing interest in applying them to Electronic Design Automation (EDA) tasks, particularly Register Transfer Level (RTL) code generation. While several RTL datasets have been introduced, most focus on syntactic validity rather than functional validation with tests, leading to training examples that compile but may not implement the intended behavior. We present VERICODER, a model for RTL code generation fine-tuned on a dataset validated for functional correctness. This fine-tuning dataset is constructed using a novel methodology that combines unit test generation with feedback-directed refinement. Given a natural language specification and an initial RTL design, we prompt a teacher model (GPT-4o-mini) to generate unit tests and iteratively revise the RTL design based on its simulation results using the generated tests. If necessary, the teacher model also updates the tests to ensure they comply with the natural language specification. As a result of this process, every example in our dataset is functionally validated, consisting of a natural language description, an RTL implementation, and passing tests. Fine-tuned on this dataset of 125,777 examples, VERICODER achieves state-of-the-art metrics in functional correctness on VerilogEval and RTLLM, with relative gains of up to $71.7\\%$ and $27.4\\%$ , respectively. An ablation study further shows that models trained on our functionally validated dataset outperform those trained on functionally non-validated datasets, underscoring the importance of high-quality datasets in RTL code generation. Our code, data, and models are publicly available at https://github.com/Anjiang-Wei/VeriCoder", + "bbox": [ + 73, + 220, + 491, + 575 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Index Terms—RTL, Code Generation, Large Language Model.", + "bbox": [ + 89, + 580, + 491, + 594 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "I. INTRODUCTION", + "text_level": 1, + "bbox": [ + 215, + 630, + 351, + 643 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large Language Models (LLMs) have demonstrated remarkable performance across natural language processing tasks, spurring growing interest in applying their capabilities to a broad range of Electronic Design Automation (EDA) problems [1]–[4]. Recent efforts explore LLMs for code generation [5]–[12], architecture design [13]–[15], verification [16], [17], tool assistance [18], [19], and debugging [1], [20]. In this work, we focus on generating Register Transfer Level (RTL) code from natural language specifications. Automating RTL code generation has the potential to significantly boost hardware design productivity and reduce the manual effort", + "bbox": [ + 73, + 650, + 491, + 816 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Anjiang Wei, Daniel Mendoza, Caroline Trippel, and Alex Aiken are affiliated with Stanford University (e-mail: anjiang@cs.stanford.edu; dmendo@stanford.edu; trippel@stanford.edu; aiken@cs.stanford.edu).", + "bbox": [ + 73, + 829, + 491, + 864 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Huanmi Tan is affiliated with Carnegie Mellon University (e-mail: huanmi.tan@gmail.com).", + "bbox": [ + 73, + 864, + 491, + 886 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tarun Suresh is affiliated with University of Illinois Urbana-Champaign (e-mail: tsuresh3@illinois.edu).", + "bbox": [ + 73, + 886, + 491, + 909 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Thiago S. F. X. Teixeira is with Intel Corporation (e-mail: thiago.teixeira@intel.com).", + "bbox": [ + 73, + 909, + 491, + 931 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ke Wang is with Nanjing University (e-mail: kwg@nju.edu.cn).", + "bbox": [ + 86, + 931, + 434, + 944 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "involved in complex design tasks, making it a timely and impactful area of research.", + "bbox": [ + 501, + 219, + 921, + 250 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Developing open-source, lightweight models for RTL code generation is essential for advancing both research and deployment. Proprietary models such as GPT-4o and Claude 3.7 restrict customization and lack transparency, making them unsuitable for in-depth analysis and academic exploration. They also raise privacy and security concerns, especially when handling RTL designs that may contain sensitive intellectual property. In contrast, lightweight models that can run locally offer a secure, privacy-preserving alternative—enabling hardware engineers to integrate AI directly into their design workflows. However, existing open-source models still underperform on RTL tasks, largely due to the absence of high-quality, functionally validated RTL datasets in their training corpora [21], [22]. While training algorithms are readily available, progress is bottlenecked by the lack of open datasets with functional correctness validation.", + "bbox": [ + 501, + 250, + 923, + 474 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A key challenge in building such datasets lies in constructing large-scale, high-quality training data that pairs natural language specifications with RTL implementations. Despite efforts to mine RTL code from open-source repositories [23]–[26], much of the collected data lacks validation and may not align with its intended functionality. To address this, recent work has turned to LLMs—either prompting them to synthesize RTL designs from keyword-based specifications [6], [7] or leveraging them to rewrite existing RTL code and generate matching specifications [8], [24], [26]. In both cases, syntax checkers are often employed to filter uncompilable code or provide feedback for iterative refinement, but these techniques still fall short of validating functional correctness.", + "bbox": [ + 501, + 477, + 923, + 672 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "As far as we know, all these prior work [6]–[8], [24], [26] have focused solely on ensuring syntactic correctness, overlooking functional correctness. As a result, many dataset examples compile successfully but may not implement the behavior described in their natural language specifications. The distinction between syntactic correctness and functional correctness has important implications for model evaluation and real-world deployment. While functionally correct code inherently satisfies syntax constraints, syntactic correctness alone does not guarantee correct functionality. This gap is evident in the results reported by the RTLLM benchmark [10], where GPT-4o attains a high syntax accuracy of $100.0\\%$ , yet achieve only $69.0\\%$ in terms of functional correctness. Ultimately, in real-world settings, it is functional correctness rather than syntactic validity that truly matters.", + "bbox": [ + 501, + 672, + 923, + 898 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this work, we introduce VeriCoder, a model for RTL code generation fine-tuned on a high-quality dataset consisting of 125,777 examples that has been validated for functional", + "bbox": [ + 501, + 898, + 921, + 944 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.15659v2 [cs.AR] 24 Aug 2025", + "bbox": [ + 22, + 273, + 58, + 724 + ], + "page_idx": 0 + }, + { + "type": "table", + "img_path": "images/9e6ce1035e375309829d6f9583b1aeaed11f21f05777f87cc4761e227314122f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Prior WorkStrategyDescriptionSyntax CheckerUnit Tests
RTLCoder [7]Keyword-based Generation, MutationPrompt LLM with keywords and existing code, followed by iterative mutation to get instruction-code pairs.X
OriGen [8]Code-to-Code, Syntax Error CorrectionApplies LLM-driven code-to-code pipeline on existing RTL code and filters them by compiler error feedback.X
BetterV [24]Web Scraping & Cleaning, Alignment with CLarge-scale web-collected Verilog, cleaned and filtered to enforce coding standards; aligns C with Verilog.X
VeriGen [26]Manually Collect Textbook and Open-Source CodeMines real-world RTL from GitHub and textbooks, manually cleans and organizes them into a structured dataset.X
ChipGPT [27]AST-based SynthesisConverts Verilog ASTs into natural-language prompts and injects semantic error variants via EDA-tool feedback.X
VeriCoder (Our Work)Feedback-Directed Refinement, Simulation, Unit Test GenerationIteratively generate unit tests with a teacher LLM, check implementations via compiler and simulator, and refining designs and tests until each design passes.
", + "bbox": [ + 76, + 63, + 923, + 305 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "TABLE I: Comparison of Verilog fine-tuning dataset construction approaches.", + "bbox": [ + 233, + 310, + 759, + 325 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "correctness1. To construct this dataset, we develop a novel pipeline that combines unit test generation with feedback-directed refinement guided by a teacher LLM (GPT-4o-mini). Given a natural language specification and an initial RTL implementation, the teacher model first generates a unit test. If the RTL code fails the simulation, the model iteratively revises the design based on the observed error messages. When needed, the unit test is also updated to better reflect the intended functionality described by the specification. This process continues until the design passes simulation or a retry limit is reached. The resulting fine-tuning dataset consists of 125,777 validated triples: a natural language specification, a correct RTL design, and a self-checking unit test.", + "bbox": [ + 73, + 352, + 491, + 547 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We fine-tune VeriCoder from Qwen2.5-14B-Instruct using our curated dataset and evaluate it on two established RTL code generation benchmarks: VerilogEval [9] and RTLLM [10]. VeriCoder achieves new state-of-the-art performance, achieving up to $71.7\\%$ and $27.4\\%$ relative gains in the pass@k metric over the previous best fine-tuned model OriGen [8].", + "bbox": [ + 73, + 549, + 491, + 638 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We conduct an ablation study demonstrating that models trained on our functionally validated dataset outperform those trained on non-validated data, under the same base model and training setup. These results highlight the importance of high-quality, functionally validated datasets for RTL code generation.", + "bbox": [ + 73, + 638, + 493, + 714 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our contributions are as follows:", + "bbox": [ + 89, + 715, + 316, + 729 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We introduce VeriCoder, an RTL code generation model fine-tuned on a dataset validated for functional correctness. On the VerilogEval and RTLLM benchmarks, VeriCoder achieves state-of-the-art performance among open-source fine-tuned models, yielding relative pass@k gains of up to $71.7\\%$ and $27.4\\%$ over the prior best.", + "- We develop a dataset augmentation pipeline that combines unit test generation with feedback-directed refinement guided by a teacher LLM. This yields, to the best of our knowledge, the largest fine-tuning dataset to date with functional validation, consisting of 125,777 validated" + ], + "bbox": [ + 91, + 731, + 491, + 897 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1While functional correctness is not fully guaranteed, we manually reviewed 100 randomly sampled examples and found that $92\\%$ of the generated RTL code correctly matches the corresponding natural language descriptions.", + "bbox": [ + 73, + 907, + 491, + 945 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "triples of natural language specifications, RTL designs, and passing tests.", + "bbox": [ + 535, + 353, + 921, + 383 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "- We conduct an ablation study showing that functional validation during dataset construction improves model performance, underscoring the importance of using high-quality functionally validated datasets for RTL code generation.", + "bbox": [ + 522, + 383, + 921, + 458 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "II. BACKGROUND AND RELATED WORK", + "text_level": 1, + "bbox": [ + 568, + 479, + 854, + 493 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A. Language Modeling and Fine-Tuning", + "text_level": 1, + "bbox": [ + 503, + 500, + 782, + 515 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Large Language Models (LLMs) are deep neural networks trained to perform language modeling, a task where the model learns to predict the next token in a sequence. Formally, given a sequence of tokens $x = (x_{1}, x_{2}, \\ldots, x_{T})$ , the training objective is to maximize the log-likelihood:", + "bbox": [ + 501, + 518, + 921, + 594 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {L M}} = \\sum_ {t = 1} ^ {T} \\log P \\left(x _ {t} \\mid x _ {< t}; \\theta\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 609, + 604, + 921, + 646 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where $\\theta$ denotes the model parameters and $x_{< t} = (x_1, \\ldots, x_{t-1})$ represents the context tokens. This autoregressive objective enables the model to generate coherent text and capture long-range dependencies across various domains.", + "bbox": [ + 503, + 652, + 921, + 712 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The training of LLMs is typically organized into two stages:", + "bbox": [ + 519, + 712, + 921, + 728 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Pre-training: The model is trained on massive, diverse corpora (e.g., web data, books, source code) to acquire broad knowledge and language understanding. This stage is expensive and performed once per model.", + "- Post-training: The pre-trained model is adapted to specific tasks using smaller, curated datasets. This stage includes supervised fine-tuning (SFT), where the model is trained on task-specific input-output pairs." + ], + "bbox": [ + 521, + 729, + 919, + 851 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Since post-training large models from scratch is resource-intensive, researchers have developed parameter-efficient fin-tuning methods. One widely used approach is Low-Rank Adaptation (LoRA) [28]. Instead of updating the full weight matrices $W \\in \\mathbb{R}^{d \\times k}$ in each linear layer, LoRA freezes the original weights and introduces a trainable low-rank update:", + "bbox": [ + 501, + 854, + 921, + 945 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 911, + 31, + 919, + 40 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/5c1e7c992b49adf8de5ea90499d94e6e076d00b68a9c604449489cfdfdbc9a13.jpg", + "image_caption": [ + "Fig. 1: LLM-guided dataset augmentation overview." + ], + "image_footnote": [], + "bbox": [ + 76, + 66, + 923, + 261 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/36bafefd15fa00a56e86e549967356d7fadc2ba905d753cdbfae75fdbec5b723.jpg", + "image_caption": [ + "(a) Natural language specification taken from the Origen [8] dataset." + ], + "image_footnote": [], + "bbox": [ + 76, + 300, + 374, + 467 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/ca37e8cb215359fe9ab488e17022939fb18c1f5bc9ddee7a7e9156793fa8f592.jpg", + "image_caption": [ + "(b) Buggy design taken from the Origen [8] dataset. It times out on the generated test shown in Figure 3.", + "Fig. 2: Natural language specification (left) and the corresponding buggy and corrected Verilog designs (middle and right). The specification and buggy design are from the original dataset [8], which lacks tests, while the test (Figure 3) and corrected design are generated by a teacher model (GPT-4o-mini) and included in our validated dataset." + ], + "image_footnote": [], + "bbox": [ + 405, + 299, + 640, + 470 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/4a5ec9cc51cb0015f2ab1c778411473f2c44ef14e45f1231bc3002fe848b6dfa.jpg", + "image_caption": [ + "(c) Correct design fixed by the teacher model that passes the generated test in Figure 3." + ], + "image_footnote": [], + "bbox": [ + 676, + 299, + 915, + 470 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nW ^ {\\prime} = W + \\Delta W = W + A B, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 617, + 491, + 633 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $A \\in \\mathbb{R}^{d \\times r}$ and $B \\in \\mathbb{R}^{r \\times k}$ , and $r \\ll \\min(d, k)$ . Only $A$ and $B$ are updated during training, while $W$ remains unchanged. This technique reduces both memory and compute overhead during adaptation, making it feasible to specialize large LLMs to domain-specific applications, such as RTL generation, with limited computational resources.", + "bbox": [ + 73, + 642, + 491, + 733 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "B. Related Work on RTL Code Generation", + "text_level": 1, + "bbox": [ + 73, + 757, + 367, + 771 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Progress on open-source RTL code generation is limited by the absence of large-scale, high-quality datasets. To mitigate this, recent efforts have focused on automated data mining and augmentation techniques to enrich existing corpora of RTL examples. Table I presents the comparison of different strategies for constructing fine-tuning datasets.", + "bbox": [ + 73, + 777, + 490, + 868 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Mining open-source RTL designs is a common strategy for dataset construction. VeriGen [26] compiles Verilog modules from GitHub and textbooks into a structured corpus using automated syntax checks. BetterV [24] collects Verilog modules from the internet and then filters designs based on coding style", + "bbox": [ + 73, + 869, + 491, + 945 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "and syntactic validity. CraftRTL [29] augments fine-tuning data with non-textual code representations, injecting synthetic errors derived from intermediate model checkpoints into open-source Verilog code. Other works [8], [30], [31] adopt similar methodologies for sourcing and preprocessing RTL code.", + "bbox": [ + 501, + 599, + 921, + 676 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Another line of work leverages a commercial LLM for synthetic data generation. RTLCoder [6] prompts GPT-3.5 with domain keywords to generate both task descriptions and corresponding RTL, discarding any outputs that fail to compile. OriGen [8] further employs Claude 3.5 in a two-stage code-to-code pipeline: first turning mined RTL code into natural language specifications, then regenerating code from these specifications under compiler guidance, combining the strengths of real-world examples and synthetic generation. ChipGPT [27] transforms Verilog ASTs into natural language specifications.", + "bbox": [ + 501, + 680, + 921, + 830 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "While most of the existing work listed in Table I ensures syntax validity, none of them has any evidence of functional correctness. Without comprehensive unit tests or simulation-based feedback during dataset construction, models fine-tuned on these corpora may produce code that compiles but still fails to meet the intended natural language specification.", + "bbox": [ + 501, + 834, + 921, + 926 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A recent work, OpenLLM-RTL [32], explores the idea of", + "bbox": [ + 519, + 929, + 921, + 945 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 2 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "`timescale 1ns/1ps\nmodule tb_and3;\nreg a = 0, b = 0, c = 0;\nwire y;\n// Instantiate the DUT (Design Under Test)\nand3 uut (.a(a), .b(b), .c(c), .y(y));\ninitial begin\n// Wait for signals to settle\n#1;\n// Set all inputs to 1; expected y = 1\n{a, b, c} = 3'b111;\n#1;\n// Check output, report error if incorrect\nif (y != 1'B1)\n$fatal(1, \"FAIL: y=%b (expected 1)\", y);\n$display(\"PASS\");\n$finish;\nend\nendmodule", + "guess_lang": "verilog", + "bbox": [ + 89, + 69, + 480, + 351 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Fig. 3: Unit test for the and3 module. The buggy design (Figure 2b) times out on this test, while the corrected design (Figure 2c) passes successfully. The test is generated by the teacher model GPT-4o-mini using the prompt in Figure 4a, and is used to validate and augment the original dataset, which contains no tests.", + "bbox": [ + 73, + 364, + 491, + 455 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "using LLMs to generate assertions, producing a functionally verified dataset of 7k examples. While sharing the same goal of improving functional correctness in fine-tuning datasets, our work takes a different approach by generating unit tests for validation. Our final dataset contains over 125,777 examples—the largest functionally validated RTL dataset to date.", + "bbox": [ + 73, + 482, + 491, + 587 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Beyond data collection and synthesis techniques, several works explore other methods to enhance RTL code generation quality. ScaleRTL [33] emphasizes reasoning by generating intermediate traces and leveraging test-time compute through iterative self-reflection. DeepRTL [34] adopts curriculum learning guided by multi-level natural language summaries. VeriSeek [35] applies reinforcement learning with feedback derived from AST-level similarity between LLM outputs and reference designs. AutoVCoder [36] incorporates retrieval-augmented generation (RAG), dynamically supplying relevant Verilog snippets to the model. CodeV [37] extends generation capabilities to tasks such as fill-in-the-middle (FIM). Our work adopts standard supervised fine-tuning while focusing on constructing a large-scale, functionally validated dataset. Our approach is complementary and orthogonal to existing techniques.", + "bbox": [ + 73, + 588, + 491, + 830 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "III. METHODOLOGY", + "text_level": 1, + "bbox": [ + 207, + 845, + 357, + 859 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "A. Overview", + "text_level": 1, + "bbox": [ + 73, + 864, + 165, + 878 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We aim to improve the quality of fine-tuning datasets consisting of natural language specifications paired with syntactically correct Verilog designs, as seen in prior work [6]–[8], [24], [26]. These datasets, including Origen [8], contain", + "bbox": [ + 73, + 883, + 491, + 946 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Verilog designs that pass syntax checks but are not validated against unit tests to ensure functional correctness. To address this limitation, we introduce an automated dataset augmentation pipeline that leverages a teacher language model, e.g., GPT-40-mini, to validate each example through iterative refinement. As illustrated in Figure 1, given a natural language specification and an initial RTL design, the teacher model first generates a unit test. If the RTL design fails the simulation, the model iteratively revises the design based on the error message. When needed, it also updates the unit test to better align with the natural language specification. Although our experiments focus on augmenting the Origen dataset due to its size and quality, the proposed methodology is broadly applicable to any dataset lacking test validation.", + "bbox": [ + 501, + 68, + 921, + 280 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The pipeline begins with the original dataset $D = \\{(\\text{specification}, \\text{design})\\}$ , where each RTL design is intended to implement a corresponding natural language specification. However, because no tests are provided, there is no evidence that the designs exhibit the intended functional behavior. For each pair, we prompt the teacher model, GPT-4o-mini, to generate a unit test for the design. The test is compiled and simulated with the design to check for correctness, where correctness means the design passes the simulation test.", + "bbox": [ + 503, + 280, + 921, + 415 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "If the simulation fails, we extract the resulting error message and re-invoke the teacher model using a refinement prompt. This prompt includes the specification, the current design and test, and the error message. The model attempts to resolve the failure by making minimal modifications to the design, the test, or both. This refinement process repeats iteratively: each candidate is re-simulated, and the cycle continues until the design passes the test or a maximum number of attempts is reached.", + "bbox": [ + 501, + 415, + 921, + 549 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The final output is a validated dataset $D' = \\{(\\text{specification}, \\text{design}, \\text{test})\\}$ , where each triplet contains a natural language specification, a Verilog design, and unit tests. A concrete motivating example is shown in Section III-B, and the details of the algorithm and prompts are provided in Section III-C.", + "bbox": [ + 503, + 550, + 923, + 641 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "B. Motivating Example", + "text_level": 1, + "bbox": [ + 504, + 655, + 666, + 670 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Figure 2 presents a motivating example taken directly from the Origen dataset [8], highlighting a key limitation of datasets that rely only on syntax checks for validation. Prior work in RTL generation typically assumes that syntactic correctness is sufficient for fine-tuning, without verifying functionality through unit tests. This example demonstrates that a design can compile without errors yet fail to implement the intended behavior. It also illustrates how our method can automatically detect and correct such issues through test generation and iterative refinement.", + "bbox": [ + 501, + 672, + 921, + 823 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This example includes a natural language specification (Figure 2a), a buggy RTL design from the original dataset (Figure 2b), and a corrected design produced by our pipeline (Figure 2c). The specification describes a simple combinational module, and3, which computes the bitwise AND of three one-bit inputs: a, b, and c.", + "bbox": [ + 501, + 824, + 921, + 912 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The original design, though syntactically valid, is functionally incorrect due to several semantic issues. First, it misuses non-", + "bbox": [ + 503, + 914, + 921, + 944 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 911, + 31, + 919, + 39 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Prompt Template", + "text_level": 1, + "bbox": [ + 91, + 71, + 218, + 87 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "System Prompt You are a Verilog design and testing expert. Given a hardware specification described in natural language, your job is to generate both a correct Verilog module and a corresponding unit test that checks its functionality through simulation.", + "bbox": [ + 89, + 93, + 470, + 141 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "User Prompt", + "text_level": 1, + "bbox": [ + 91, + 147, + 169, + 157 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Natural Language Specification: {NL Spec}", + "- Initial Implementation: {design}", + "- Your task:" + ], + "bbox": [ + 91, + 159, + 344, + 191 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) Provide the unit tests for the given design.", + "2) Revise the Verilog implementation if the original design fails to pass your test cases.", + "3) Follow good coding practices, such as using meaningful comments to document key logic and decision points.", + "4) Use $fatal(1, \"msg\") to flag incorrect behavior.", + "5) Output format: {\"design\": \"...\", \"test\": \"...\"}" + ], + "bbox": [ + 102, + 193, + 468, + 273 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "(a) Prompt for generating a Verilog module's corresponding test", + "bbox": [ + 83, + 292, + 475, + 306 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Prompt Template", + "text_level": 1, + "bbox": [ + 527, + 71, + 656, + 85 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "System Prompt You are a Verilog design and testing expert. Analyze a failing design and its test, and make minimal yet sufficient edits to correct the issue while preserving the intended behavior specified in natural language.", + "bbox": [ + 526, + 93, + 906, + 141 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "User Prompt", + "text_level": 1, + "bbox": [ + 527, + 147, + 604, + 157 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Natural Language Specification: {NL Spec}", + "- Previous Design and Test: {design}, {test}", + "- Simulation Output: {error message}", + "- Your task:" + ], + "bbox": [ + 529, + 159, + 782, + 203 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) Carefully identify the root cause of the failure by analyzing the code and the error message.", + "2) Make changes to either the design or the test (or both) to resolve the issue while maintaining correctness.", + "3) Output format: {\"explanation\": \"...\", \"design\": \"...\", \"test\": \"...\"}" + ], + "bbox": [ + 539, + 204, + 906, + 272 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "(b) Prompt for refining a failing Verilog design and test", + "bbox": [ + 545, + 292, + 887, + 306 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Fig. 4: Prompt templates provided to the teacher model for automated Verilog test generation and refinement, ensuring that the final design passes the generated test and matches the original natural language specification.", + "bbox": [ + 73, + 314, + 919, + 345 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "blocking assignments $(<=)$ inside a combinational always @\\* block, which can lead to counterintuitive synthesis results. Second, if instead used inside a sequential block, the sequence of non-blocking assignments in the design-y <= a, then y <= y & c, and finally y <= y & b—does not correctly compute and store in y the bitwise AND of a, b, and c. In particular, non-blocking assignments defer updates until the end of the current timestep, meaning that all assignments operate on the same initial value of y, and only the final assignment takes effect. Finally, if the non-blocking assignments were replaced with blocking ones, the code would introduce a combinational feedback loop, which cannot stabilize.", + "bbox": [ + 73, + 372, + 491, + 551 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "These types of errors occur because the RTL code in prior datasets, including Origen [8], is synthetically generated by teacher LLMs such as Claude 3.5 and filtered only through syntax checks. Without simulation or test-based validation, semantic bugs that affect functional correctness remain undetected.", + "bbox": [ + 73, + 553, + 491, + 642 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We provide the natural language specification and the buggy RTL design to the teacher model GPT-4o-mini, prompting it to generate a unit test using the template shown in Figure 4a (further detailed in Section III-C). The resulting test is shown in Figure 3, which sets all three inputs to 1 and checks whether the output y evaluates to 1 as expected. When the buggy design (Figure 2b) is simulated with this test, it hangs and ultimately times out. The bug exemplifies a combinational loop. The always @* block is meant for combinational logic and its evaluation is triggered upon changes to any of the variables read inside the block. In this case, an evaluation of the block is triggered when either y, a, b, or c changes. However, y is both read (on the RHS) and written (on the LHS) in the same block. Upon evaluating the block, it schedules an update to y, which causes a change to y. This change retriggers the block, leading to another scheduled update to y, and so on. This loop continues indefinitely, preventing the simulation from converging.", + "bbox": [ + 73, + 643, + 491, + 914 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The corrected version replaces the non-blocking assignments with a single blocking assignment $(=)$ , ensuring that $y$ is updated", + "bbox": [ + 73, + 914, + 491, + 945 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1 Dataset Augmentation with a Teacher LLM" + ], + "code_body": "Input: Original dataset $D = \\{(s_i,d_i)\\}_{i = 1}^N$ $\\triangleright s_i$ : NL specification; $d_{i}$ : RTL design Maximum attempts $T$ \nDefine: GenTestTpl $\\leftarrow$ prompt template for test generation RefineTpl $\\leftarrow$ prompt template for iterative refinement \nOutput: Augmented dataset $D^{\\prime} = \\{(s_{i},d_{i},t_{i})\\}_{i = 1}^{M}$ $\\triangleright t_i$ : Generated unit test \n1: $D^{\\prime}\\gets \\emptyset$ \n2: for each $(s,d)\\in D$ do \n3: attempt $\\leftarrow 0$ success $\\leftarrow$ false \n4: while attempt $< T$ ∧ ¬success do \n5: attempt $\\leftarrow$ attempt + 1 \n6: if attempt $= = 1$ then \n7: d,t $\\leftarrow$ LLMInvoke(GenTestTpl,s,d) \n8: else \n9: d,t $\\leftarrow$ LLMInvoke(RefineTpl,s,d,t,err) \n10: success, err $\\leftarrow$ RunVerilogTest(d,t) \n11: if success then \n12: $D^{\\prime}\\gets D^{\\prime}\\cup \\{(s,d,t)\\}$ \n13: return $D^{\\prime}$", + "bbox": [ + 506, + 388, + 921, + 700 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "immediately with the result of a & b & c, as required by the specification. This version passes the test generated by the teacher model and behaves correctly under simulation.", + "bbox": [ + 503, + 726, + 919, + 771 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This example underscores the importance of functional validation in RTL datasets. Syntax checks alone cannot catch subtle but critical semantic errors. Our methodology, through teacher-driven test generation and iterative refinement, ensures that each design in the augmented dataset is not only syntactically valid but also functionally validated with unit tests.", + "bbox": [ + 501, + 772, + 921, + 862 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "C. Algorithm and Prompts", + "text_level": 1, + "bbox": [ + 504, + 880, + 689, + 895 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Algorithm 1 presents our automated pipeline for transforming an unvalidated RTL dataset into a functionally validated one. Starting from a dataset $D = \\{(s_i, d_i)\\}_{i=1}^N$ , where each", + "bbox": [ + 503, + 898, + 921, + 945 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 911, + 30, + 919, + 40 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "example consists of a natural language specification $s_i$ and a corresponding RTL design $d_i$ (e.g., from Origen [8]), the goal is to generate a unit test $t_i$ that validates the functional correctness of the design. If the design fails to pass the test, we invoke an iterative refinement loop that updates the design and test until it passes or a maximum number of attempts $T$ is reached. We set $T = 5$ in our experiments.", + "bbox": [ + 73, + 69, + 491, + 174 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The procedure is powered by a teacher model, GPT-4o-mini, which corresponds to the LLMInvoke calls in Algorithm 1. While stronger models such as GPT-4o or o3-mini may yield better performance, we use GPT-4o-mini in practice because of the large size of the dataset (217,462 examples in Origen) and the high cost associated with repeated API queries to OpenAI models.", + "bbox": [ + 73, + 174, + 491, + 280 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The process begins by prompting the teacher model with the test generation template (Figure 4a), together with a natural language specification and its initial RTL design (e.g., Figure 2a and Figure 2b). The model then produces a candidate unit test (e.g., Figure 3) designed to check whether the design satisfies the intended functionality under simulation.", + "bbox": [ + 73, + 280, + 491, + 371 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The design and test are compiled and simulated using standard Verilog tooling. If the test fails, for example due to a timeout, incorrect output, or another runtime error, we construct a refinement prompt (Figure 4b) that includes the specification, the failing design and test, and the simulation error message (corresponding to the err variable in Algorithm 1). This prompt is then passed to the teacher model, which attempts to fix the issue by making edits to the design, the test, or both.", + "bbox": [ + 73, + 371, + 491, + 492 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The refinement process repeats until the updated design passes simulation or the maximum number of attempts $T$ is reached. Once a design successfully passes its test, the validated triple $(s_i, d_i, t_i)$ is added to the output dataset $D'$ .", + "bbox": [ + 73, + 492, + 491, + 553 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "This strategy enables systematic detection and correction of subtle RTL bugs that cannot be identified through syntax checks alone. By integrating LLM-based test generation and iterative refinement into the dataset construction pipeline, we produce a dataset that is not only syntactically valid but also functionally validated through simulation.", + "bbox": [ + 73, + 553, + 491, + 643 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "While functional correctness under all possible inputs cannot be guaranteed, the inclusion of unit tests makes our augmented dataset substantially more robust than prior approaches that rely solely on syntactic checking. We view this as a practical and scalable step toward building higher-quality fine-tuning datasets for RTL generation. To assess quality, we manually reviewed 100 randomly sampled examples and found that $92\\%$ of the generated RTL code correctly matched the corresponding natural language descriptions.", + "bbox": [ + 73, + 643, + 491, + 781 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "IV. EXPERIMENTAL SETUP", + "text_level": 1, + "bbox": [ + 184, + 799, + 380, + 811 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "A. Dataset", + "text_level": 1, + "bbox": [ + 73, + 819, + 153, + 832 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Following the methodology described in Section III, we construct a fine-tuning dataset comprising 125,777 examples. Each example includes a natural language specification, a corresponding RTL design, and associated unit tests. Table II summarizes key statistics: the specifications contain an average of 247 words (ranging from 116 to 549), RTL implementations average 35 lines of code (ranging from 5 to 225), and unit", + "bbox": [ + 73, + 838, + 491, + 946 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/5c4b5c5dd73b0cbcb62a0a21938683d163731bae2003182c9845173afbe9b6cb.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CategoryCountLength
MinMaxAvg
NL specification (words)116549247
Design (lines of RTL)125,777522535
Unit tests (lines of RTL)619755
", + "bbox": [ + 526, + 63, + 901, + 150 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "TABLE II: Dataset statistics: total number of examples and length distributions for natural language specifications, RTL implementations, and unit tests in the VeriCoder dataset.", + "bbox": [ + 503, + 155, + 923, + 200 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "tests average 55 lines (ranging from 6 to 197). We use the specification-solution pairs from this dataset to train our model, VeriCoder.", + "bbox": [ + 503, + 228, + 923, + 273 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "B. LoRA Fine-Tuning Setup", + "text_level": 1, + "bbox": [ + 504, + 295, + 697, + 310 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Following standard practices for LLM fine-tuning, we fine-tune the base model of Qwen2.5-14B-Instruct using Low-Rank Adaptation (LoRA, described in Section II-A), with a rank of 16 and a scaling factor of 32 to all linear projection layers in the transformer. Training is conducted over 3 epochs with a batch size of 40. We adopt a constant learning rate of $1 \\times 10^{-5}$ , paired with a linear decay scheduler and a warm-up ratio of 0.05. The optimizer is used with a weight decay of $1 \\times 10^{-4}$ and gradient clipping is applied with a maximum norm of 1.", + "bbox": [ + 501, + 313, + 923, + 450 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "C. Benchmarks and Metrics", + "text_level": 1, + "bbox": [ + 504, + 470, + 700, + 484 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Following the evaluation protocol established in prior work [7], [8], we benchmark against VerilogEval [9] and RTLLM [10]. For VerilogEval, we report the standard Pass@k metric with $k \\in \\{1,5,10\\}$ , which estimates the expected probability that at least one of the top- $k$ generated programs passes all test cases. The metric is defined as:", + "bbox": [ + 501, + 489, + 921, + 580 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {P a s s} @ k = \\mathbb {E} \\left[ 1 - \\frac {\\binom {n - c} {k}}{\\binom {n} {k}} \\right]\n$$\n", + "text_format": "latex", + "bbox": [ + 619, + 585, + 802, + 628 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $n$ is the total number of generated programs and $c$ is the number of correct ones. All test cases are manually created by experts who design the benchmarks. In all evaluations, we set $n = 10$ . For RTLLM, we report both syntax correctness and functional correctness using Pass@5. This evaluation setup aligns with that used in prior work [8].", + "bbox": [ + 501, + 632, + 921, + 724 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "D. Models for Evaluation", + "text_level": 1, + "bbox": [ + 504, + 744, + 684, + 758 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We evaluate two groups of models. The first group consists of pretrained-only base models, including OpenAI's latest releases (o4-mini, o3-mini, GPT-4o, GPT-4o-mini), Google's Gemini 2.0 Flash, DeepSeek's R1 and DeepSeek-Coder-7B-v1.5 (the base model used in prior work [8]), Meta's LLaMA2-7B model, and Alibaba's Qwen2.5-14B-Instruct (our base model for fine-tuning). The second group includes fine-tuned models with released weights from prior work: Origen [8], RTLCoder [6], and ChipGPT [27].", + "bbox": [ + 501, + 762, + 921, + 898 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To ensure a fair comparison, we use identical input prompts and post-processing scripts across all models. For models released by prior work, we do not adopt their model-specific", + "bbox": [ + 503, + 898, + 921, + 946 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 911, + 31, + 919, + 39 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/1c236d229de3d510d104e28dbad8d6ae03c635914ee6ce1bf2d818846dae0d09.jpg", + "table_caption": [], + "table_footnote": [ + "TABLE III: RTL code generation performance across models. To ensure a fair comparison, we use the same input prompts and apply identical post-processing scripts, running inference with model weights released by prior work." + ], + "table_body": "
Model TypeEvaluated ModelVerilogEval V1.0 [9] \n(using pass@k metric)RTLLM V1.1 [10] \n(using pass@5 metric)
Eval-Machine (%)Eval-Human (%)Syntax-VCS (%)Functional (%)
k=1k=5k=10k=1k=5k=10
Base Modelso4-mini-2025-04-1661.967.868.664.366.467.186.272.4
GPT-4o-2024-11-2063.766.567.154.360.462.2100.069.0
GPT-4o-mini-2024-07-1855.762.464.344.751.655.189.765.5
DeepSeek-R165.770.972.062.869.169.979.358.6
o3-mini-2025-01-3166.471.672.062.068.969.969.055.2
Qwen2.5-14B-Instruct47.854.255.235.340.042.369.041.4
Gemini-2.0-flash-00160.362.663.652.157.659.065.534.5
DeepSeek-R1-Distill-Qwen-14B46.264.168.536.751.755.162.134.5
DeepSeek-Coder-7B-v1.544.458.962.925.840.244.948.324.1
LLaMA-2-7B7.015.618.90.42.13.83.40.0
Fine-Tuned Models \n(Prior Work)OriGen [8]35.965.168.522.347.551.951.737.9
RTLCoder-DeepSeek [6]22.051.457.314.735.242.317.210.3
RTLCoder-Mistral [6]17.646.456.612.431.536.53.40.0
ChipGPT-LLaMA3.1-8B-SFT [27]17.646.456.612.431.536.513.80.0
ChipGPT-LLaMA2-SFT-7B [27]0.94.27.70.62.23.86.90.0
Our WorkVeriCoder55.762.964.338.349.251.979.348.3
", + "bbox": [ + 81, + 63, + 915, + 351 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "prompts [8] or inference pipelines [6], [27]. Instead, we apply a uniform evaluation script, with the only variable being the model under test. This standardization is critical, as both input formatting and post-processing can significantly affect performance. By controlling these factors, we isolate model capability and enable a fair comparison.", + "bbox": [ + 73, + 415, + 491, + 506 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "V. RESULTS", + "text_level": 1, + "bbox": [ + 236, + 525, + 328, + 539 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "A. Main Evaluation Results", + "text_level": 1, + "bbox": [ + 73, + 546, + 267, + 559 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table III shows the results. Our major findings are as follows:", + "bbox": [ + 89, + 566, + 491, + 580 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "a) Comparison with prior work: VeriCoder achieves state-of-the-art results across two RTL code generation benchmarks, outperforming all previously released open-source finetuned models. On VerilogEval-Machine, VeriCoder attains a pass@1 accuracy of $55.7\\%$ , representing a 19.8 percentage point improvement over the best prior model, OriGen. On VerilogEval-Human, it reaches $38.3\\%$ , exceeding OriGen by 16.0 percentage points. Across all evaluated $k$ -shot settings $(k = 1, 5, 10)$ , VeriCoder consistently maintains its lead on the Human split. On the RTLLM benchmark, VeriCoder achieves $79.3\\%$ syntax correctness and $48.3\\%$ functional correctness, surpassing OriGen's $51.7\\%$ and $37.9\\%$ , respectively. In conclusion, VeriCoder delivers relative improvements of up to $71.7\\%$ on VerilogEval and $27.4\\%$ on RTLLM in pass@k accuracy, surpassing the previous state-of-the-art model on both benchmarks.", + "bbox": [ + 73, + 582, + 491, + 821 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To better understand the relatively low performance of ChipGPT [27], we examined its outputs in detail. We observed that its generated RTL designs often include module headers that deviate from the given specifications, revealing difficulty in precise instruction following. Moreover, its base model, LLaMA2-7B, performs even worse, suggesting that limitations in the instruction-following capabilities of the underlying pretrained model constrain the effectiveness of the fine-tuned", + "bbox": [ + 73, + 823, + 491, + 946 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "variant. For a fair comparison, we do not apply any of the model-specific customized post-processing scripts that attempt to fix syntax or header issues. Instead, we use a standardized evaluation script for all models, extracting Verilog code as-is to ensure consistency.", + "bbox": [ + 501, + 415, + 921, + 492 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "b) Effectiveness of our fine-tuning: Starting from Qwen-2.5-14B-Instruct as our base model, VeriCoder delivers substantial gains across VerilogEval. On the VerilogEvalMachine split, pass@1 jumps up by $7.6\\%$ , pass@5 by $4.0\\%$ , and pass@10 by $2.1\\%$ , and VerilogEval-Human reflects the same trend. On RTLLM, functional pass@5 is $7\\%$ higher than its base model. Specifically, VeriCoder even marginally outperforms one of the commercial models, Google's Gemini2.0-flash, on pass@5 and pass@10 metrics of Eval-Machine as well as on RTLLM. Together, these results demonstrate that our fine-tuning process and our validated dataset significantly boost pass@k metrics and semantic correctness in RTL generation.", + "bbox": [ + 501, + 513, + 921, + 696 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "c) Model gap remains: Despite the observed improvements, a substantial performance gap persists between VeriCoder and the strongest large models. For instance, o3-mini attains $66.4\\%$ on VerilogEval Pass@1 compared to VeriCoder's $55.7\\%$ . DeepSeek-R1 achieves $69.1\\%$ on human-graded Pass@5, versus VeriCoder's $49.2\\%$ . Commercial LLMs such as GPT-4o reach a perfect $100.0\\%$ Syntax-VCS validity and $69.0\\%$ functional correctness, while VeriCoder records $79.3\\%$ and $48.3\\%$ , respectively. Despite the performance gap, open-source lightweight models offer compelling advantages. They provide transparency, allow for local deployment, and ensure intellectual property protection, i.e., capabilities that are particularly important for RTL design workflows where security, customizability, and integration into existing toolchains are critical.", + "bbox": [ + 501, + 717, + 921, + 944 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 911, + 30, + 919, + 39 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/05504b8cfde9e73c710b4509186e765d80b7e2614e9c4341ab0ec20f2649812f.jpg", + "table_caption": [], + "table_footnote": [ + "TABLE IV: We performed fine-tuning on the same base model using a functionally validated dataset and the functionally unvalidated dataset [8]. We report Pass@5 metrics for all models on two benchmarks." + ], + "table_body": "
ModelVerilogEval [9] (Pass@5)RTLLM [10] (Pass@5)
SyntaxFunc
Qwen2.5-14B-Instruct (base)46.869.041.4
Qwen w/ unvalidated data53.575.944.8
Qwen w/ validated data55.879.348.3
", + "bbox": [ + 76, + 63, + 496, + 162 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "B. Ablation Study of Dataset", + "text_level": 1, + "bbox": [ + 78, + 260, + 274, + 273 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To assess the impact of dataset quality on RTL code generation, we conduct an ablation study using the same base model, Qwen2.5-14B-Instruct, fine-tuned on two datasets: (1) the unvalidated OriGen dataset from prior work [8], and (2) our newly curated, functionally validated dataset. All factors, including dataset size, fine-tuning hyperparameters, training procedures, and evaluation settings, are held constant to ensure a fair comparison.", + "bbox": [ + 76, + 282, + 490, + 402 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Across all metrics, we observe a consistent improvement as dataset quality increases. On the VerilogEval benchmark (covering both Machine and Human subsets), the base model achieves $46.8\\%$ Pass@5. Fine-tuning on the unvalidated dataset raises performance to $53.5\\%$ , while our validated dataset further improves it to $55.8\\%$ . For RTLLM syntax correctness, the trend is similar: $69.0\\%$ for the base model, $75.9\\%$ for the unvalidated version, and $79.3\\%$ when trained on validated data. Functional correctness sees even more significant improvement, rising from $41.4\\%$ (base) to $44.8\\%$ (unvalidated) and ultimately to $48.3\\%$ (validated).", + "bbox": [ + 76, + 405, + 488, + 569 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "These results demonstrate that functionally validated data provides more effective supervision than existing unvalidated data. This also underscores the importance of dataset quality in fine-tuning LLMs for RTL code generation.", + "bbox": [ + 76, + 571, + 488, + 632 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "C. Test Passing Rates of Non-Validated Datasets", + "text_level": 1, + "bbox": [ + 78, + 664, + 406, + 676 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We examine the quality of fine-tuning datasets released by prior work by evaluating their passing rates against our synthetic unit tests generated by the teacher model GPT-4o-mini. For each corpus, we randomly sample 1,000 Verilog implementations and apply the test generation and refinement pipeline described in Section III. We then run corresponding unit tests against the original design and measure the proportion of the original designs that successfully pass the generated tests. As shown in Table V, only $24.4\\%$ examples of the RTLCoder dataset [6] pass our functional tests, while OriGen [8] reaches $53.5\\%$ .", + "bbox": [ + 76, + 686, + 490, + 851 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "OriGen's higher pass rate aligns with its stronger code generation results in Table III, hinting at a positive link between dataset validity and downstream performance. These findings highlight the potential value of incorporating functional correctness validation into fine-tuning dataset curation for better RTL code generation.", + "bbox": [ + 76, + 854, + 488, + 944 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/78eb9c01e8aec3197f5771bc2618afcf24170b28ee21cbb3c8fa98cb91a33acc.jpg", + "table_caption": [], + "table_footnote": [ + "TABLE V: Test passing rates $(\\%)$ of datasets released by prior work on a randomly sampled set of 1000 examples." + ], + "table_body": "
Prior Datasets# Sampled ExamplesTest Passing (%)
RTLCoder [6]100024.4
OriGen [8]100053.5
", + "bbox": [ + 517, + 63, + 910, + 118 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "VI. DISCUSSION AND FUTURE WORK", + "text_level": 1, + "bbox": [ + 581, + 183, + 846, + 195 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "While VeriCoder, combining unit test generation with feedback-driven refinement, improves the functional correctness of generated RTL code, it does not fully guarantee correctness. Synthetic test cases may fail to capture all possible edge cases. To address this challenge, future work should explore integrating formal verification techniques into the dataset construction pipeline to rigorously ensure the correctness of the generated code. Recent advancements have demonstrated promising results in translating natural language instructions into formal specifications [16], [38], as well as enforcing formal constraints during LLM-based code generation [39].", + "bbox": [ + 508, + 200, + 919, + 364 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Moreover, most existing approaches, including VeriCoder, focus on small-scale RTL generation. However, practical hardware development often involves large, repository-level codebases with intricate cross-file dependencies and requirements for long-range context [40]–[42]. Recent work has begun to address these challenges through techniques such as combining fine-tuning with retrieval-augmented RTL code generation [43], [44]. Extending VeriCoder's unit test generation and feedback-directed refinement components to the repository scale will enable LLMs to handle more real-world RTL tasks.", + "bbox": [ + 508, + 366, + 919, + 516 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Furthermore, reinforcement learning (RL) offers a powerful framework for further optimizing large language models' performance beyond what is achievable through supervised fine-tuning alone. Recent studies have demonstrated the effectiveness of RL in enhancing LLM-based code generation by incorporating diverse forms of feedback, such as test case outcomes, compiler diagnostics, and formal verification results [32], [45], [46]. Building on this progress, future work could investigate applying RL techniques to the VeriCoder dataset, using the accompanying test cases as a feedback signal to iteratively improve RTL code generation quality.", + "bbox": [ + 508, + 517, + 919, + 681 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "VII. CONCLUSION", + "text_level": 1, + "bbox": [ + 648, + 700, + 779, + 712 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Recent advances in Large Language Models (LLMs) have opened new possibilities for Electronic Design Automation (EDA), particularly in RTL code generation. However, most existing datasets emphasize syntactic validity while overlooking functional correctness, which limits the effectiveness of finetuned models. We introduce VERICODER, a model fine-tuned on a dataset with 125,000 examples that is validated for functional correctness. This dataset is constructed using a feedback-directed refinement pipeline guided by a teacher LLM, which generates and iteratively updates both RTL designs and unit tests until the design passes simulation. The resulting dataset consists of functionally validated triples comprising a natural language specification, an RTL implementation, and a passing test. Fine-tuned on this dataset, VERICODER achieves state-of-the-art results on two established RTL benchmarks,", + "bbox": [ + 508, + 718, + 919, + 943 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 911, + 31, + 919, + 39 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "yielding relative improvements of up to $71.7\\%$ on VerilogEval and $27.4\\%$ on RTLLM. An ablation study confirms the impact of functional validation on model performance, underscoring the importance of high-quality training data. Future work may explore formal verification and reinforcement learning to further advance AI-assisted hardware design.", + "bbox": [ + 73, + 69, + 491, + 161 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENT", + "text_level": 1, + "bbox": [ + 210, + 184, + 357, + 196 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We thank Samantha Archer, Yao Hsiao, Mohammad Rahmani Fadiheh and Subhasish Mitra for their discussions. This work was partially supported by a Google Research Award.", + "bbox": [ + 73, + 207, + 493, + 253 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 235, + 277, + 331, + 291 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] M. Liu, T.-D. Ene, R. Kirby, C. Cheng, N. Pinckney, R. Liang, J. Alben, H. Anand, S. Banerjee, I. Bayraktaroglu et al., \"Chipnemo: Domain-adapted llms for chip design,\" arXiv preprint arXiv:2311.00176, 2023.", + "[2] L. Chen, Y. Chen, Z. Chu, W. Fang, T.-Y. Ho, R. Huang, Y. Huang, S. Khan, M. Li, X. Li et al., \"The dawn of ai-native eda: Opportunities and challenges of large circuit models,\" arXiv preprint arXiv:2403.07257, 2024.", + "[3] R. Zhong, X. Du, S. Kai, Z. Tang, S. Xu, H.-L. Zhen, J. Hao, Q. Xu, M. Yuan, and J. Yan, \"Llm4eda: Emerging progress in large language models for electronic design automation,\" arXiv preprint arXiv:2401.12224, 2023.", + "[4] Z. He and B. Yu, “Large language models for eda: Future or mirage?” in Proceedings of the 2024 International Symposium on Physical Design, 2024, pp. 65–66.", + "[5] X. Yao, Y. Wang, X. Li, Y. Lian, R. Chen, L. Chen, M. Yuan, H. Xu, and B. Yu, \"Rtlwriter: Methodologies for large models aided rtl code optimization,\" in Proceedings of the 43rd IEEE/ACM International Conference on Computer-Aided Design, 2024, pp. 1-7.", + "[6] S. Liu, W. Fang, Y. Lu, J. Wang, Q. Zhang, H. Zhang, and Z. Xie, \"Rtlcoder: Fully open-source and efficient ltm-assisted rtl code generation technique,\" IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, 2024.", + "[7] S. Liu, W. Fang, Y. Lu, Q. Zhang, H. Zhang, and Z. Xie, \"Rtlcoder: Outperforming gpt-3.5 in design rtl generation with our open-source dataset and lightweight solution,\" in 2024 IEEE LLM Aided Design Workshop (LAD). IEEE, 2024, pp. 1-5.", + "[8] F. Cui, C. Yin, K. Zhou, Y. Xiao, G. Sun, Q. Xu, Q. Guo, D. Song, D. Lin, X. Zhang et al., \"Origen: Enhancing rtl code generation with code-to-code augmentation and self-reflection,\" arXiv preprint arXiv:2407.16237, 2024.", + "[9] M. Liu, N. Pinckney, B. Khailany, and H. Ren, \"Veriloggeval: Evaluating large language models for verilog code generation,\" in 2023 IEEE/ACM International Conference on Computer Aided Design (ICCAD). IEEE, 2023, pp. 1-8.", + "[10] Y. Lu, S. Liu, Q. Zhang, and Z. Xie, \"Rtllm: An open-source benchmark for design rtl generation with large language model,\" in 2024 29th Asia and South Pacific Design Automation Conference (ASP-DAC). IEEE, 2024, pp. 722-727.", + "[11] Y. Tsai, M. Liu, and H. Ren, \"Rtlfixer: Automatically fixing rtI syntax errors with large language model,\" in Proceedings of the 61st ACM/IEEE Design Automation Conference, 2024, pp. 1-6.", + "[12] Y. Liao, T. Adegbija, and R. Lysecky, \"Are llms any good for high-level synthesis?\" in Proceedings of the 43rd IEEE/ACM International Conference on Computer-Aided Design, 2024, pp. 1-8.", + "[13] Y. Fu, Y. Zhang, Z. Yu, S. Li, Z. Ye, C. Li, C. Wan, and Y. C. Lin, \"Gpt4aigchip: Towards next-generation ai accelerator design automation via large language models,\" in 2023 IEEE/ACM International Conference on Computer Aided Design (ICCAD). IEEE, 2023, pp. 1-9.", + "[14] Z. Yan, Y. Qin, X. S. Hu, and Y. Shi, \"On the viability of using llms for sw/hw co-design: An example in designing cim dnn accelerators,\" in 2023 IEEE 36th International System-on-Chip Conference (SOCC). IEEE, 2023, pp. 1-6.", + "[15] Z. Liang, J. Cheng, R. Yang, H. Ren, Z. Song, D. Wu, X. Qian, T. Li, and Y. Shi, \"Unleashing the potential of llms for quantum computing: A study in quantum architecture design,\" arXiv preprint arXiv:2307.08191, 2023." + ], + "bbox": [ + 76, + 301, + 491, + 943 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[16] M. Cosler, C. Hahn, D. Mendoza, F. Schmitt, and C. Trippel, \"nl2spec: Interactively translating unstructured natural language to temporal logics with large language models,\" in International Conference on Computer Aided Verification. Springer, 2023, pp. 383-396.", + "[17] C. Sun, C. Hahn, and C. Trippel, \"Towards improving verification productivity with circuit-aware translation of natural language to systemverilog assertions,\" in First International Workshop on Deep Learning-aided Verification, 2023.", + "[18] H. Wu, Z. He, X. Zhang, X. Yao, S. Zheng, H. Zheng, and B. Yu, \"Chateda: A large language model powered autonomous agent for eda,\" IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, 2024.", + "[19] Z. Xiao, X. He, H. Wu, B. Yu, and Y. Guo, \"Eda-copilot: A ragpowered intelligent assistant for eda tools,\" ACM Transactions on Design Automation of Electronic Systems, 2025.", + "[20] K. Xu, J. Sun, Y. Hu, X. Fang, W. Shan, X. Wang, and Z. Jiang, \"Meic: Re-thinking rtl debug automation using llms,\" in Proceedings of the 43rd IEEE/ACM International Conference on Computer-Aided Design, 2024, pp. 1-9.", + "[21] R. Li, L. B. Allal, Y. Zi, N. Muennighoff, D. Kocetkov, C. Mou, M. Marone, C. Akiki, J. Li, J. Chim et al., \"Starcoder: may the source be with you!\" arXiv preprint arXiv:2305.06161, 2023.", + "[22] A. Lozhkov, R. Li, L. B. Allal, F. Cassano, J. Lamy-Poirier, N. Tazi, A. Tang, D. Pykhtar, J. Liu, Y. Wei et al., \"Starcoder 2 and the stack v2: The next generation,\" arXiv preprint arXiv:2402.19173, 2024.", + "[23] E. Dehaerne, B. Dey, S. Halder, and S. De Gendt, “A deep learning framework for verilog autocompletion towards design and verification automation,” arXiv preprint arXiv:2304.13840, 2023.", + "[24] Z. Pei, H.-L. Zhen, M. Yuan, Y. Huang, and B. Yu, \"Betterv: Controlled verilog generation with discriminative guidance,\" arXiv preprint arXiv:2402.03375, 2024.", + "[25] S. Thakur, B. Ahmad, Z. Fan, H. Pearce, B. Tan, R. Karri, B. Dolan-Gavitt, and S. Garg, \"Benchmarking large language models for automated verilog RTL code generation,\" in 2023 Design, Automation & Test in Europe Conference & Exhibition (DATE). IEEE, 2023, pp. 1-6.", + "[26] S. Thakur, B. Ahmad, H. Pearce, B. Tan, B. Dolan-Gavitt, R. Karri, and S. Garg, \"Verigen: A large language model for verilog code generation,\" ACM Transactions on Design Automation of Electronic Systems, vol. 29, no. 3, pp. 1-31, 2024.", + "[27] K. Chang, K. Wang, N. Yang, Y. Wang, D. Jin, W. Zhu, Z. Chen, C. Li, H. Yan, Y. Zhou et al., \"Data is all you need: Finetuning llms for chip design via an automated design-data augmentation framework,\" in Proceedings of the 61st ACM/IEEE Design Automation Conference, 2024, pp. 1-6.", + "[28] E. J. Hu, Y. Shen, P. Wallis, Z. Allen-Zhu, Y. Li, S. Wang, L. Wang, W. Chen et al., “Lora: Low-rank adaptation of large language models.” *ICLR*, vol. 1, no. 2, p. 3, 2022.", + "[29] M. Liu, Y.-D. Tsai, W. Zhou, and H. Ren, \"Craftrtl: High-quality synthetic data generation for verilog code models with correct-by-construction non-textual representations and targeted code repair,\" ArXiv, vol. abs/2409.12993, 2024. [Online]. Available: https://api_semanticscholar.org/CorpusID:272770433", + "[30] Y. Zhang, Z. Yu, Y. Fu, C. Wan, and Y. C. Lin, \"Mg-verilog: Multi-grained dataset towards enhanced llm-assisted verilog generation,\" in 2024 IEEE LLM Aided Design Workshop (LAD). IEEE, 2024, pp. 1-5.", + "[31] E. Goh, M. Xiang, I. Wey, T. H. Teo et al., “From english to asi: Hardware implementation with large language model,” arXiv preprint arXiv:2403.07039, 2024.", + "[32] S. Liu, Y. Lu, W. Fang, M. Li, and Z. Xie, \"Openllm-rtl: Open dataset and benchmark for llm-aided design rtl generation,\" in Proceedings of the 43rd IEEE/ACM International Conference on Computer-Aided Design, 2024, pp. 1-9.", + "[33] C. Deng, Y.-D. Tsai, G.-T. Liu, Z. Yu, and H. Ren, \"Scalertl: Scaling llms with reasoning data and test-time compute for accurate rtl code generation,\" ArXiv, vol. abs/2506.05566, 2025. [Online]. Available: https://api-semanticscholar.org/CorpusID:279243692", + "[34] Y. Liu, C. Xu, Y. Zhou, Z. Li, and Q. Xu, \"Deeprl: Bridging verilog understanding and generation with a unified representation model,\" ArXiv, vol. abs/2502.15832, 2025. [Online]. Available: https://api-semanticscholar.org/CorpusID:276574886", + "[35] N. Wang, B. Yao, J. Zhou, X. Wang, Z. Jiang, and N. Guan, \"Large language model for verilog generation with golden code feedback,\" ArXiv, vol. abs/2407.18271, 2024. [Online]. Available: https://api_semanticscholar.org/CorpusID:271516462", + "[36] M. Gao, J. Zhao, Z. Lin, W. Ding, X. Hou, Y. Feng, C. Li, and M. Guo, \"Autovcoder: A systematic framework for automated verilog code generation using llms,\" 2024 IEEE 42nd International Conference" + ], + "bbox": [ + 506, + 70, + 921, + 943 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 911, + 30, + 919, + 39 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "on Computer Design (ICCD), pp. 162-169, 2024. [Online]. Available: https://api(semanticscholar.org/CorpusID:271516210", + "[37] Y. Zhao, D. Huang, C. Li, P. Jin, Z. Nan, T. Ma, L. Qi, Y. Pan, Z. Zhang, R. Zhang, X. Zhang, Z. Du, Q. Guo, X. Hu, and Y. Chen, \"Codev: Empowering llms withhdl generation through multi-level summarization,\" 2024. [Online]. Available: https://api_semanticscholar.org/CorpusID:271212791", + "[38] D. Mendoza, C. Hahn, and C. Trippel, \"Translating natural language to temporal logics with large language models and model checkers,\" in 2024 Formal Methods in Computer-Aided Design (FMCAD), 2024, pp. 1-11.", + "[39] P. Aggarwal, B. Parno, and S. Welleck, \"Alphaverus: Bootstrapping formally verified code generation through self-improving translation and treefinement,\" arXiv preprint arXiv:2412.06176, 2024.", + "[40] C. E. Jimenez, J. Yang, A. Wettig, S. Yao, K. Pei, O. Press, and K. Narasimhan, \"Swe-bench: Can language models resolve real-world github issues?\" arXiv preprint arXiv:2310.06770, 2023.", + "[41] T. Suresh, R. G. Reddy, Y. Xu, Z. Nussbaum, A. Mulyar, B. Duderstadt, and H. Ji, \"Cornstack: High-quality contrastive data for better code retrieval and reranking,\" in The Thirteenth International Conference on Learning Representations, 2025.", + "[42] N. Jain, M. Shetty, T. Zhang, K. Han, K. Sen, and I. Stoica, “R2e: Turning any github repository into a programming agent environment,” in ICML, 2024.", + "[43] P. Wu, N. Guo, J. Lv, X. Xiao, and X. Ye, \"RtlrepEncoder: Repository-level rtl code completion through the combination of fine-tuning and retrieval augmentation,\" arXiv preprint arXiv:2504.08862, 2025.", + "[44] Z. Li, C. Xu, Z. Shi, Z. Peng, Y. Liu, Y. Zhou, L. Zhou, C. Ma, J. Zhong, X. Wang et al., \"Deepcircuits: A comprehensive repository-level dataset for rtl code understanding, generation, and ppa analysis,\" arXiv preprint arXiv:2502.18297, 2025.", + "[45] N. Wang, B. Yao, J. Zhou, X. Wang, Z. Jiang, and N. Guan, “Large language model for verilog generation with golden code feedback,” arXiv preprint arXiv:2407.18271, 2024.", + "[46] J. Wang, Z. Zhang, Y. He, Y. Song, T. Shi, Y. Li, H. Xu, K. Wu, G. Qian, Q. Chen et al., “Enhancing code llms with reinforcement learning in code generation,” arXiv preprint arXiv:2412.20367, 2024." + ], + "bbox": [ + 76, + 71, + 491, + 489 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 906, + 31, + 919, + 40 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15659/c7f88426-e408-4745-97f1-882178397313_model.json b/data/2025/2504_15xxx/2504.15659/c7f88426-e408-4745-97f1-882178397313_model.json new file mode 100644 index 0000000000000000000000000000000000000000..e8fcf1b1899d5fa30b4106e252f10aedab170662 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15659/c7f88426-e408-4745-97f1-882178397313_model.json @@ -0,0 +1,2486 @@ +[ + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "1" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.275, + 0.06, + 0.725 + ], + "angle": 270, + "content": "arXiv:2504.15659v2 [cs.AR] 24 Aug 2025" + }, + { + "type": "title", + "bbox": [ + 0.1, + 0.071, + 0.898, + 0.131 + ], + "angle": 0, + "content": "VeriCoder: Enhancing LLM-Based RTL Code Generation through Functional Correctness Validation" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.143, + 0.786, + 0.177 + ], + "angle": 0, + "content": "Anjiang Wei \\(①\\), Huanmi Tan \\(①\\), Tarun Suresh \\(①\\), Daniel Mendoza \\(①\\), Thiago S. F. X. Teixeira \\(①\\), Ke Wang \\(①\\), Caroline Trippel \\(①\\), and Alex Aiken \\(①\\)" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.221, + 0.493, + 0.576 + ], + "angle": 0, + "content": "Abstract—Recent advances in Large Language Models (LLMs) have sparked growing interest in applying them to Electronic Design Automation (EDA) tasks, particularly Register Transfer Level (RTL) code generation. While several RTL datasets have been introduced, most focus on syntactic validity rather than functional validation with tests, leading to training examples that compile but may not implement the intended behavior. We present VERICODER, a model for RTL code generation fine-tuned on a dataset validated for functional correctness. This fine-tuning dataset is constructed using a novel methodology that combines unit test generation with feedback-directed refinement. Given a natural language specification and an initial RTL design, we prompt a teacher model (GPT-4o-mini) to generate unit tests and iteratively revise the RTL design based on its simulation results using the generated tests. If necessary, the teacher model also updates the tests to ensure they comply with the natural language specification. As a result of this process, every example in our dataset is functionally validated, consisting of a natural language description, an RTL implementation, and passing tests. Fine-tuned on this dataset of 125,777 examples, VERICODER achieves state-of-the-art metrics in functional correctness on VerilogEval and RTLLM, with relative gains of up to \\(71.7\\%\\) and \\(27.4\\%\\), respectively. An ablation study further shows that models trained on our functionally validated dataset outperform those trained on functionally non-validated datasets, underscoring the importance of high-quality datasets in RTL code generation. Our code, data, and models are publicly available at https://github.com/Anjiang-Wei/VeriCoder" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.581, + 0.493, + 0.595 + ], + "angle": 0, + "content": "Index Terms—RTL, Code Generation, Large Language Model." + }, + { + "type": "title", + "bbox": [ + 0.217, + 0.631, + 0.352, + 0.644 + ], + "angle": 0, + "content": "I. INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.651, + 0.493, + 0.817 + ], + "angle": 0, + "content": "Large Language Models (LLMs) have demonstrated remarkable performance across natural language processing tasks, spurring growing interest in applying their capabilities to a broad range of Electronic Design Automation (EDA) problems [1]–[4]. Recent efforts explore LLMs for code generation [5]–[12], architecture design [13]–[15], verification [16], [17], tool assistance [18], [19], and debugging [1], [20]. In this work, we focus on generating Register Transfer Level (RTL) code from natural language specifications. Automating RTL code generation has the potential to significantly boost hardware design productivity and reduce the manual effort" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.83, + 0.493, + 0.865 + ], + "angle": 0, + "content": "Anjiang Wei, Daniel Mendoza, Caroline Trippel, and Alex Aiken are affiliated with Stanford University (e-mail: anjiang@cs.stanford.edu; dmendo@stanford.edu; trippel@stanford.edu; aiken@cs.stanford.edu)." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.865, + 0.493, + 0.887 + ], + "angle": 0, + "content": "Huanmi Tan is affiliated with Carnegie Mellon University (e-mail: huanmi.tan@gmail.com)." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.887, + 0.493, + 0.91 + ], + "angle": 0, + "content": "Tarun Suresh is affiliated with University of Illinois Urbana-Champaign (e-mail: tsuresh3@illinois.edu)." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.91, + 0.493, + 0.933 + ], + "angle": 0, + "content": "Thiago S. F. X. Teixeira is with Intel Corporation (e-mail: thiago.teixeira@intel.com)." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.933, + 0.436, + 0.945 + ], + "angle": 0, + "content": "Ke Wang is with Nanjing University (e-mail: kwg@nju.edu.cn)." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.22, + 0.922, + 0.25 + ], + "angle": 0, + "content": "involved in complex design tasks, making it a timely and impactful area of research." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.251, + 0.924, + 0.476 + ], + "angle": 0, + "content": "Developing open-source, lightweight models for RTL code generation is essential for advancing both research and deployment. Proprietary models such as GPT-4o and Claude 3.7 restrict customization and lack transparency, making them unsuitable for in-depth analysis and academic exploration. They also raise privacy and security concerns, especially when handling RTL designs that may contain sensitive intellectual property. In contrast, lightweight models that can run locally offer a secure, privacy-preserving alternative—enabling hardware engineers to integrate AI directly into their design workflows. However, existing open-source models still underperform on RTL tasks, largely due to the absence of high-quality, functionally validated RTL datasets in their training corpora [21], [22]. While training algorithms are readily available, progress is bottlenecked by the lack of open datasets with functional correctness validation." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.478, + 0.924, + 0.673 + ], + "angle": 0, + "content": "A key challenge in building such datasets lies in constructing large-scale, high-quality training data that pairs natural language specifications with RTL implementations. Despite efforts to mine RTL code from open-source repositories [23]–[26], much of the collected data lacks validation and may not align with its intended functionality. To address this, recent work has turned to LLMs—either prompting them to synthesize RTL designs from keyword-based specifications [6], [7] or leveraging them to rewrite existing RTL code and generate matching specifications [8], [24], [26]. In both cases, syntax checkers are often employed to filter uncompilable code or provide feedback for iterative refinement, but these techniques still fall short of validating functional correctness." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.673, + 0.924, + 0.899 + ], + "angle": 0, + "content": "As far as we know, all these prior work [6]–[8], [24], [26] have focused solely on ensuring syntactic correctness, overlooking functional correctness. As a result, many dataset examples compile successfully but may not implement the behavior described in their natural language specifications. The distinction between syntactic correctness and functional correctness has important implications for model evaluation and real-world deployment. While functionally correct code inherently satisfies syntax constraints, syntactic correctness alone does not guarantee correct functionality. This gap is evident in the results reported by the RTLLM benchmark [10], where GPT-4o attains a high syntax accuracy of \\(100.0\\%\\), yet achieve only \\(69.0\\%\\) in terms of functional correctness. Ultimately, in real-world settings, it is functional correctness rather than syntactic validity that truly matters." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.9, + 0.922, + 0.945 + ], + "angle": 0, + "content": "In this work, we introduce VeriCoder, a model for RTL code generation fine-tuned on a high-quality dataset consisting of 125,777 examples that has been validated for functional" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.032, + 0.921, + 0.041 + ], + "angle": 0, + "content": "2" + }, + { + "type": "table", + "bbox": [ + 0.077, + 0.064, + 0.924, + 0.306 + ], + "angle": 0, + "content": "
Prior WorkStrategyDescriptionSyntax CheckerUnit Tests
RTLCoder [7]Keyword-based Generation, MutationPrompt LLM with keywords and existing code, followed by iterative mutation to get instruction-code pairs.X
OriGen [8]Code-to-Code, Syntax Error CorrectionApplies LLM-driven code-to-code pipeline on existing RTL code and filters them by compiler error feedback.X
BetterV [24]Web Scraping & Cleaning, Alignment with CLarge-scale web-collected Verilog, cleaned and filtered to enforce coding standards; aligns C with Verilog.X
VeriGen [26]Manually Collect Textbook and Open-Source CodeMines real-world RTL from GitHub and textbooks, manually cleans and organizes them into a structured dataset.X
ChipGPT [27]AST-based SynthesisConverts Verilog ASTs into natural-language prompts and injects semantic error variants via EDA-tool feedback.X
VeriCoder (Our Work)Feedback-Directed Refinement, Simulation, Unit Test GenerationIteratively generate unit tests with a teacher LLM, check implementations via compiler and simulator, and refining designs and tests until each design passes.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.234, + 0.311, + 0.761, + 0.326 + ], + "angle": 0, + "content": "TABLE I: Comparison of Verilog fine-tuning dataset construction approaches." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.353, + 0.492, + 0.549 + ], + "angle": 0, + "content": "correctness1. To construct this dataset, we develop a novel pipeline that combines unit test generation with feedback-directed refinement guided by a teacher LLM (GPT-4o-mini). Given a natural language specification and an initial RTL implementation, the teacher model first generates a unit test. If the RTL code fails the simulation, the model iteratively revises the design based on the observed error messages. When needed, the unit test is also updated to better reflect the intended functionality described by the specification. This process continues until the design passes simulation or a retry limit is reached. The resulting fine-tuning dataset consists of 125,777 validated triples: a natural language specification, a correct RTL design, and a self-checking unit test." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.55, + 0.493, + 0.64 + ], + "angle": 0, + "content": "We fine-tune VeriCoder from Qwen2.5-14B-Instruct using our curated dataset and evaluate it on two established RTL code generation benchmarks: VerilogEval [9] and RTLLM [10]. VeriCoder achieves new state-of-the-art performance, achieving up to \\(71.7\\%\\) and \\(27.4\\%\\) relative gains in the pass@k metric over the previous best fine-tuned model OriGen [8]." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.64, + 0.495, + 0.715 + ], + "angle": 0, + "content": "We conduct an ablation study demonstrating that models trained on our functionally validated dataset outperform those trained on non-validated data, under the same base model and training setup. These results highlight the importance of high-quality, functionally validated datasets for RTL code generation." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.716, + 0.317, + 0.73 + ], + "angle": 0, + "content": "Our contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.732, + 0.493, + 0.822 + ], + "angle": 0, + "content": "- We introduce VeriCoder, an RTL code generation model fine-tuned on a dataset validated for functional correctness. On the VerilogEval and RTLLM benchmarks, VeriCoder achieves state-of-the-art performance among open-source fine-tuned models, yielding relative pass@k gains of up to \\(71.7\\%\\) and \\(27.4\\%\\) over the prior best." + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.823, + 0.493, + 0.898 + ], + "angle": 0, + "content": "- We develop a dataset augmentation pipeline that combines unit test generation with feedback-directed refinement guided by a teacher LLM. This yields, to the best of our knowledge, the largest fine-tuning dataset to date with functional validation, consisting of 125,777 validated" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.732, + 0.493, + 0.898 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.908, + 0.493, + 0.946 + ], + "angle": 0, + "content": "1While functional correctness is not fully guaranteed, we manually reviewed 100 randomly sampled examples and found that \\(92\\%\\) of the generated RTL code correctly matches the corresponding natural language descriptions." + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.354, + 0.922, + 0.384 + ], + "angle": 0, + "content": "triples of natural language specifications, RTL designs, and passing tests." + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.384, + 0.922, + 0.459 + ], + "angle": 0, + "content": "- We conduct an ablation study showing that functional validation during dataset construction improves model performance, underscoring the importance of using high-quality functionally validated datasets for RTL code generation." + }, + { + "type": "title", + "bbox": [ + 0.57, + 0.48, + 0.855, + 0.494 + ], + "angle": 0, + "content": "II. BACKGROUND AND RELATED WORK" + }, + { + "type": "title", + "bbox": [ + 0.504, + 0.5, + 0.783, + 0.516 + ], + "angle": 0, + "content": "A. Language Modeling and Fine-Tuning" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.52, + 0.922, + 0.595 + ], + "angle": 0, + "content": "Large Language Models (LLMs) are deep neural networks trained to perform language modeling, a task where the model learns to predict the next token in a sequence. Formally, given a sequence of tokens \\( x = (x_{1}, x_{2}, \\ldots, x_{T}) \\), the training objective is to maximize the log-likelihood:" + }, + { + "type": "equation", + "bbox": [ + 0.61, + 0.606, + 0.922, + 0.647 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {L M}} = \\sum_ {t = 1} ^ {T} \\log P \\left(x _ {t} \\mid x _ {< t}; \\theta\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.653, + 0.922, + 0.713 + ], + "angle": 0, + "content": "where \\(\\theta\\) denotes the model parameters and \\(x_{< t} = (x_1, \\ldots, x_{t-1})\\) represents the context tokens. This autoregressive objective enables the model to generate coherent text and capture long-range dependencies across various domains." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.713, + 0.922, + 0.729 + ], + "angle": 0, + "content": "The training of LLMs is typically organized into two stages:" + }, + { + "type": "text", + "bbox": [ + 0.522, + 0.731, + 0.921, + 0.791 + ], + "angle": 0, + "content": "- Pre-training: The model is trained on massive, diverse corpora (e.g., web data, books, source code) to acquire broad knowledge and language understanding. This stage is expensive and performed once per model." + }, + { + "type": "text", + "bbox": [ + 0.522, + 0.792, + 0.92, + 0.852 + ], + "angle": 0, + "content": "- Post-training: The pre-trained model is adapted to specific tasks using smaller, curated datasets. This stage includes supervised fine-tuning (SFT), where the model is trained on task-specific input-output pairs." + }, + { + "type": "list", + "bbox": [ + 0.522, + 0.731, + 0.921, + 0.852 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.855, + 0.922, + 0.946 + ], + "angle": 0, + "content": "Since post-training large models from scratch is resource-intensive, researchers have developed parameter-efficient fin-tuning methods. One widely used approach is Low-Rank Adaptation (LoRA) [28]. Instead of updating the full weight matrices \\( W \\in \\mathbb{R}^{d \\times k} \\) in each linear layer, LoRA freezes the original weights and introduces a trainable low-rank update:" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "3" + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.068, + 0.924, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.32, + 0.268, + 0.674, + 0.284 + ], + "angle": 0, + "content": "Fig. 1: LLM-guided dataset augmentation overview." + }, + { + "type": "image", + "bbox": [ + 0.078, + 0.301, + 0.375, + 0.468 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.075, + 0.481, + 0.376, + 0.51 + ], + "angle": 0, + "content": "(a) Natural language specification taken from the Origen [8] dataset." + }, + { + "type": "image", + "bbox": [ + 0.406, + 0.3, + 0.642, + 0.471 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.402, + 0.482, + 0.638, + 0.522 + ], + "angle": 0, + "content": "(b) Buggy design taken from the Origen [8] dataset. It times out on the generated test shown in Figure 3." + }, + { + "type": "image", + "bbox": [ + 0.678, + 0.3, + 0.916, + 0.471 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.674, + 0.482, + 0.913, + 0.522 + ], + "angle": 0, + "content": "(c) Correct design fixed by the teacher model that passes the generated test in Figure 3." + }, + { + "type": "image_caption", + "bbox": [ + 0.073, + 0.528, + 0.924, + 0.575 + ], + "angle": 0, + "content": "Fig. 2: Natural language specification (left) and the corresponding buggy and corrected Verilog designs (middle and right). The specification and buggy design are from the original dataset [8], which lacks tests, while the test (Figure 3) and corrected design are generated by a teacher model (GPT-4o-mini) and included in our validated dataset." + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.618, + 0.492, + 0.634 + ], + "angle": 0, + "content": "\\[\nW ^ {\\prime} = W + \\Delta W = W + A B, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.643, + 0.493, + 0.734 + ], + "angle": 0, + "content": "where \\( A \\in \\mathbb{R}^{d \\times r} \\) and \\( B \\in \\mathbb{R}^{r \\times k} \\), and \\( r \\ll \\min(d, k) \\). Only \\( A \\) and \\( B \\) are updated during training, while \\( W \\) remains unchanged. This technique reduces both memory and compute overhead during adaptation, making it feasible to specialize large LLMs to domain-specific applications, such as RTL generation, with limited computational resources." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.758, + 0.368, + 0.772 + ], + "angle": 0, + "content": "B. Related Work on RTL Code Generation" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.778, + 0.491, + 0.869 + ], + "angle": 0, + "content": "Progress on open-source RTL code generation is limited by the absence of large-scale, high-quality datasets. To mitigate this, recent efforts have focused on automated data mining and augmentation techniques to enrich existing corpora of RTL examples. Table I presents the comparison of different strategies for constructing fine-tuning datasets." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.87, + 0.492, + 0.946 + ], + "angle": 0, + "content": "Mining open-source RTL designs is a common strategy for dataset construction. VeriGen [26] compiles Verilog modules from GitHub and textbooks into a structured corpus using automated syntax checks. BetterV [24] collects Verilog modules from the internet and then filters designs based on coding style" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.601, + 0.923, + 0.678 + ], + "angle": 0, + "content": "and syntactic validity. CraftRTL [29] augments fine-tuning data with non-textual code representations, injecting synthetic errors derived from intermediate model checkpoints into open-source Verilog code. Other works [8], [30], [31] adopt similar methodologies for sourcing and preprocessing RTL code." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.681, + 0.922, + 0.832 + ], + "angle": 0, + "content": "Another line of work leverages a commercial LLM for synthetic data generation. RTLCoder [6] prompts GPT-3.5 with domain keywords to generate both task descriptions and corresponding RTL, discarding any outputs that fail to compile. OriGen [8] further employs Claude 3.5 in a two-stage code-to-code pipeline: first turning mined RTL code into natural language specifications, then regenerating code from these specifications under compiler guidance, combining the strengths of real-world examples and synthetic generation. ChipGPT [27] transforms Verilog ASTs into natural language specifications." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.835, + 0.922, + 0.927 + ], + "angle": 0, + "content": "While most of the existing work listed in Table I ensures syntax validity, none of them has any evidence of functional correctness. Without comprehensive unit tests or simulation-based feedback during dataset construction, models fine-tuned on these corpora may produce code that compiles but still fails to meet the intended natural language specification." + }, + { + "type": "text", + "bbox": [ + 0.52, + 0.93, + 0.922, + 0.946 + ], + "angle": 0, + "content": "A recent work, OpenLLM-RTL [32], explores the idea of" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.032, + 0.921, + 0.04 + ], + "angle": 0, + "content": "4" + }, + { + "type": "code", + "bbox": [ + 0.091, + 0.07, + 0.482, + 0.352 + ], + "angle": 0, + "content": "```bash\n`timescale 1ns/1ps\nmodule tb_and3;\nreg a = 0, b = 0, c = 0;\nwire y;\n// Instantiate the DUT (Design Under Test)\nand3 uut (.a(a), .b(b), .c(c), .y(y));\ninitial begin\n// Wait for signals to settle\n#1;\n// Set all inputs to 1; expected y = 1\n{a, b, c} = 3'b111;\n#1;\n// Check output, report error if incorrect\nif (y != 1'B1)\n$fatal(1, \"FAIL: y=%b (expected 1)\", y);\n$display(\"PASS\");\n$finish;\nend\nendmodule" + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.365, + 0.493, + 0.456 + ], + "angle": 0, + "content": "Fig. 3: Unit test for the and3 module. The buggy design (Figure 2b) times out on this test, while the corrected design (Figure 2c) passes successfully. The test is generated by the teacher model GPT-4o-mini using the prompt in Figure 4a, and is used to validate and augment the original dataset, which contains no tests." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.483, + 0.492, + 0.588 + ], + "angle": 0, + "content": "using LLMs to generate assertions, producing a functionally verified dataset of 7k examples. While sharing the same goal of improving functional correctness in fine-tuning datasets, our work takes a different approach by generating unit tests for validation. Our final dataset contains over 125,777 examples—the largest functionally validated RTL dataset to date." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.589, + 0.493, + 0.831 + ], + "angle": 0, + "content": "Beyond data collection and synthesis techniques, several works explore other methods to enhance RTL code generation quality. ScaleRTL [33] emphasizes reasoning by generating intermediate traces and leveraging test-time compute through iterative self-reflection. DeepRTL [34] adopts curriculum learning guided by multi-level natural language summaries. VeriSeek [35] applies reinforcement learning with feedback derived from AST-level similarity between LLM outputs and reference designs. AutoVCoder [36] incorporates retrieval-augmented generation (RAG), dynamically supplying relevant Verilog snippets to the model. CodeV [37] extends generation capabilities to tasks such as fill-in-the-middle (FIM). Our work adopts standard supervised fine-tuning while focusing on constructing a large-scale, functionally validated dataset. Our approach is complementary and orthogonal to existing techniques." + }, + { + "type": "title", + "bbox": [ + 0.209, + 0.847, + 0.358, + 0.86 + ], + "angle": 0, + "content": "III. METHODOLOGY" + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.866, + 0.166, + 0.879 + ], + "angle": 0, + "content": "A. Overview" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.885, + 0.493, + 0.947 + ], + "angle": 0, + "content": "We aim to improve the quality of fine-tuning datasets consisting of natural language specifications paired with syntactically correct Verilog designs, as seen in prior work [6]–[8], [24], [26]. These datasets, including Origen [8], contain" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.069, + 0.923, + 0.281 + ], + "angle": 0, + "content": "Verilog designs that pass syntax checks but are not validated against unit tests to ensure functional correctness. To address this limitation, we introduce an automated dataset augmentation pipeline that leverages a teacher language model, e.g., GPT-40-mini, to validate each example through iterative refinement. As illustrated in Figure 1, given a natural language specification and an initial RTL design, the teacher model first generates a unit test. If the RTL design fails the simulation, the model iteratively revises the design based on the error message. When needed, it also updates the unit test to better align with the natural language specification. Although our experiments focus on augmenting the Origen dataset due to its size and quality, the proposed methodology is broadly applicable to any dataset lacking test validation." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.281, + 0.923, + 0.416 + ], + "angle": 0, + "content": "The pipeline begins with the original dataset \\( D = \\{(\\text{specification}, \\text{design})\\} \\), where each RTL design is intended to implement a corresponding natural language specification. However, because no tests are provided, there is no evidence that the designs exhibit the intended functional behavior. For each pair, we prompt the teacher model, GPT-4o-mini, to generate a unit test for the design. The test is compiled and simulated with the design to check for correctness, where correctness means the design passes the simulation test." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.416, + 0.923, + 0.55 + ], + "angle": 0, + "content": "If the simulation fails, we extract the resulting error message and re-invoke the teacher model using a refinement prompt. This prompt includes the specification, the current design and test, and the error message. The model attempts to resolve the failure by making minimal modifications to the design, the test, or both. This refinement process repeats iteratively: each candidate is re-simulated, and the cycle continues until the design passes the test or a maximum number of attempts is reached." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.551, + 0.924, + 0.642 + ], + "angle": 0, + "content": "The final output is a validated dataset \\( D' = \\{(\\text{specification}, \\text{design}, \\text{test})\\} \\), where each triplet contains a natural language specification, a Verilog design, and unit tests. A concrete motivating example is shown in Section III-B, and the details of the algorithm and prompts are provided in Section III-C." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.656, + 0.668, + 0.671 + ], + "angle": 0, + "content": "B. Motivating Example" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.674, + 0.922, + 0.824 + ], + "angle": 0, + "content": "Figure 2 presents a motivating example taken directly from the Origen dataset [8], highlighting a key limitation of datasets that rely only on syntax checks for validation. Prior work in RTL generation typically assumes that syntactic correctness is sufficient for fine-tuning, without verifying functionality through unit tests. This example demonstrates that a design can compile without errors yet fail to implement the intended behavior. It also illustrates how our method can automatically detect and correct such issues through test generation and iterative refinement." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.825, + 0.922, + 0.914 + ], + "angle": 0, + "content": "This example includes a natural language specification (Figure 2a), a buggy RTL design from the original dataset (Figure 2b), and a corrected design produced by our pipeline (Figure 2c). The specification describes a simple combinational module, and3, which computes the bitwise AND of three one-bit inputs: a, b, and c." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.915, + 0.923, + 0.945 + ], + "angle": 0, + "content": "The original design, though syntactically valid, is functionally incorrect due to several semantic issues. First, it misuses non-" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.041 + ], + "angle": 0, + "content": "5" + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.072, + 0.22, + 0.088 + ], + "angle": 0, + "content": "Prompt Template" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.094, + 0.471, + 0.142 + ], + "angle": 0, + "content": "System Prompt You are a Verilog design and testing expert. Given a hardware specification described in natural language, your job is to generate both a correct Verilog module and a corresponding unit test that checks its functionality through simulation." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.148, + 0.17, + 0.159 + ], + "angle": 0, + "content": "User Prompt" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.16, + 0.346, + 0.171 + ], + "angle": 0, + "content": "- Natural Language Specification: {NL Spec}" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.172, + 0.283, + 0.183 + ], + "angle": 0, + "content": "- Initial Implementation: {design}" + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.184, + 0.163, + 0.193 + ], + "angle": 0, + "content": "- Your task:" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.16, + 0.346, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.194, + 0.354, + 0.205 + ], + "angle": 0, + "content": "1) Provide the unit tests for the given design." + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.206, + 0.469, + 0.227 + ], + "angle": 0, + "content": "2) Revise the Verilog implementation if the original design fails to pass your test cases." + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.228, + 0.469, + 0.25 + ], + "angle": 0, + "content": "3) Follow good coding practices, such as using meaningful comments to document key logic and decision points." + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.251, + 0.397, + 0.261 + ], + "angle": 0, + "content": "4) Use $fatal(1, \"msg\") to flag incorrect behavior." + }, + { + "type": "text", + "bbox": [ + 0.104, + 0.262, + 0.418, + 0.274 + ], + "angle": 0, + "content": "5) Output format: {\"design\": \"...\", \"test\": \"...\"}" + }, + { + "type": "list", + "bbox": [ + 0.104, + 0.194, + 0.469, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.294, + 0.476, + 0.308 + ], + "angle": 0, + "content": "(a) Prompt for generating a Verilog module's corresponding test" + }, + { + "type": "title", + "bbox": [ + 0.529, + 0.072, + 0.657, + 0.087 + ], + "angle": 0, + "content": "Prompt Template" + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.094, + 0.907, + 0.142 + ], + "angle": 0, + "content": "System Prompt You are a Verilog design and testing expert. Analyze a failing design and its test, and make minimal yet sufficient edits to correct the issue while preserving the intended behavior specified in natural language." + }, + { + "type": "title", + "bbox": [ + 0.529, + 0.148, + 0.606, + 0.159 + ], + "angle": 0, + "content": "User Prompt" + }, + { + "type": "text", + "bbox": [ + 0.53, + 0.16, + 0.783, + 0.171 + ], + "angle": 0, + "content": "- Natural Language Specification: {NL Spec}" + }, + { + "type": "text", + "bbox": [ + 0.53, + 0.172, + 0.776, + 0.183 + ], + "angle": 0, + "content": "- Previous Design and Test: {design}, {test}" + }, + { + "type": "text", + "bbox": [ + 0.53, + 0.184, + 0.737, + 0.194 + ], + "angle": 0, + "content": "- Simulation Output: {error message}" + }, + { + "type": "text", + "bbox": [ + 0.53, + 0.195, + 0.6, + 0.204 + ], + "angle": 0, + "content": "- Your task:" + }, + { + "type": "list", + "bbox": [ + 0.53, + 0.16, + 0.783, + 0.204 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.205, + 0.906, + 0.227 + ], + "angle": 0, + "content": "1) Carefully identify the root cause of the failure by analyzing the code and the error message." + }, + { + "type": "text", + "bbox": [ + 0.54, + 0.228, + 0.906, + 0.25 + ], + "angle": 0, + "content": "2) Make changes to either the design or the test (or both) to resolve the issue while maintaining correctness." + }, + { + "type": "text", + "bbox": [ + 0.54, + 0.25, + 0.907, + 0.273 + ], + "angle": 0, + "content": "3) Output format: {\"explanation\": \"...\", \"design\": \"...\", \"test\": \"...\"}" + }, + { + "type": "list", + "bbox": [ + 0.54, + 0.205, + 0.907, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.546, + 0.294, + 0.888, + 0.308 + ], + "angle": 0, + "content": "(b) Prompt for refining a failing Verilog design and test" + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.315, + 0.921, + 0.346 + ], + "angle": 0, + "content": "Fig. 4: Prompt templates provided to the teacher model for automated Verilog test generation and refinement, ensuring that the final design passes the generated test and matches the original natural language specification." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.373, + 0.492, + 0.553 + ], + "angle": 0, + "content": "blocking assignments \\((<=)\\) inside a combinational always @\\* block, which can lead to counterintuitive synthesis results. Second, if instead used inside a sequential block, the sequence of non-blocking assignments in the design-y <= a, then y <= y & c, and finally y <= y & b—does not correctly compute and store in y the bitwise AND of a, b, and c. In particular, non-blocking assignments defer updates until the end of the current timestep, meaning that all assignments operate on the same initial value of y, and only the final assignment takes effect. Finally, if the non-blocking assignments were replaced with blocking ones, the code would introduce a combinational feedback loop, which cannot stabilize." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.554, + 0.493, + 0.643 + ], + "angle": 0, + "content": "These types of errors occur because the RTL code in prior datasets, including Origen [8], is synthetically generated by teacher LLMs such as Claude 3.5 and filtered only through syntax checks. Without simulation or test-based validation, semantic bugs that affect functional correctness remain undetected." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.644, + 0.493, + 0.915 + ], + "angle": 0, + "content": "We provide the natural language specification and the buggy RTL design to the teacher model GPT-4o-mini, prompting it to generate a unit test using the template shown in Figure 4a (further detailed in Section III-C). The resulting test is shown in Figure 3, which sets all three inputs to 1 and checks whether the output y evaluates to 1 as expected. When the buggy design (Figure 2b) is simulated with this test, it hangs and ultimately times out. The bug exemplifies a combinational loop. The always @* block is meant for combinational logic and its evaluation is triggered upon changes to any of the variables read inside the block. In this case, an evaluation of the block is triggered when either y, a, b, or c changes. However, y is both read (on the RHS) and written (on the LHS) in the same block. Upon evaluating the block, it schedules an update to y, which causes a change to y. This change retriggers the block, leading to another scheduled update to y, and so on. This loop continues indefinitely, preventing the simulation from converging." + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.915, + 0.492, + 0.946 + ], + "angle": 0, + "content": "The corrected version replaces the non-blocking assignments with a single blocking assignment \\((=)\\), ensuring that \\(y\\) is updated" + }, + { + "type": "code_caption", + "bbox": [ + 0.506, + 0.369, + 0.895, + 0.385 + ], + "angle": 0, + "content": "Algorithm 1 Dataset Augmentation with a Teacher LLM" + }, + { + "type": "algorithm", + "bbox": [ + 0.507, + 0.389, + 0.922, + 0.702 + ], + "angle": 0, + "content": "Input: Original dataset \\(D = \\{(s_i,d_i)\\}_{i = 1}^N\\) \n\\(\\triangleright s_i\\) : NL specification; \\(d_{i}\\) : RTL design Maximum attempts \\(T\\) \nDefine: GenTestTpl \\(\\leftarrow\\) prompt template for test generation RefineTpl \\(\\leftarrow\\) prompt template for iterative refinement \nOutput: Augmented dataset \\(D^{\\prime} = \\{(s_{i},d_{i},t_{i})\\}_{i = 1}^{M}\\) \n\\(\\triangleright t_i\\) : Generated unit test \n1: \\(D^{\\prime}\\gets \\emptyset\\) \n2: for each \\((s,d)\\in D\\) do \n3: attempt \\(\\leftarrow 0\\) success \\(\\leftarrow\\) false \n4: while attempt \\(< T\\) ∧ ¬success do \n5: attempt \\(\\leftarrow\\) attempt + 1 \n6: if attempt \\(= = 1\\) then \n7: d,t \\(\\leftarrow\\) LLMInvoke(GenTestTpl,s,d) \n8: else \n9: d,t \\(\\leftarrow\\) LLMInvoke(RefineTpl,s,d,t,err) \n10: success, err \\(\\leftarrow\\) RunVerilogTest(d,t) \n11: if success then \n12: \\(D^{\\prime}\\gets D^{\\prime}\\cup \\{(s,d,t)\\}\\) \n13: return \\(D^{\\prime}\\)" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.727, + 0.921, + 0.772 + ], + "angle": 0, + "content": "immediately with the result of a & b & c, as required by the specification. This version passes the test generated by the teacher model and behaves correctly under simulation." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.773, + 0.922, + 0.863 + ], + "angle": 0, + "content": "This example underscores the importance of functional validation in RTL datasets. Syntax checks alone cannot catch subtle but critical semantic errors. Our methodology, through teacher-driven test generation and iterative refinement, ensures that each design in the augmented dataset is not only syntactically valid but also functionally validated with unit tests." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.881, + 0.691, + 0.896 + ], + "angle": 0, + "content": "C. Algorithm and Prompts" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.899, + 0.922, + 0.946 + ], + "angle": 0, + "content": "Algorithm 1 presents our automated pipeline for transforming an unvalidated RTL dataset into a functionally validated one. Starting from a dataset \\( D = \\{(s_i, d_i)\\}_{i=1}^N \\), where each" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.032, + 0.921, + 0.04 + ], + "angle": 0, + "content": "6" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.07, + 0.492, + 0.175 + ], + "angle": 0, + "content": "example consists of a natural language specification \\( s_i \\) and a corresponding RTL design \\( d_i \\) (e.g., from Origen [8]), the goal is to generate a unit test \\( t_i \\) that validates the functional correctness of the design. If the design fails to pass the test, we invoke an iterative refinement loop that updates the design and test until it passes or a maximum number of attempts \\( T \\) is reached. We set \\( T = 5 \\) in our experiments." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.175, + 0.493, + 0.281 + ], + "angle": 0, + "content": "The procedure is powered by a teacher model, GPT-4o-mini, which corresponds to the LLMInvoke calls in Algorithm 1. While stronger models such as GPT-4o or o3-mini may yield better performance, we use GPT-4o-mini in practice because of the large size of the dataset (217,462 examples in Origen) and the high cost associated with repeated API queries to OpenAI models." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.281, + 0.492, + 0.372 + ], + "angle": 0, + "content": "The process begins by prompting the teacher model with the test generation template (Figure 4a), together with a natural language specification and its initial RTL design (e.g., Figure 2a and Figure 2b). The model then produces a candidate unit test (e.g., Figure 3) designed to check whether the design satisfies the intended functionality under simulation." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.372, + 0.492, + 0.493 + ], + "angle": 0, + "content": "The design and test are compiled and simulated using standard Verilog tooling. If the test fails, for example due to a timeout, incorrect output, or another runtime error, we construct a refinement prompt (Figure 4b) that includes the specification, the failing design and test, and the simulation error message (corresponding to the err variable in Algorithm 1). This prompt is then passed to the teacher model, which attempts to fix the issue by making edits to the design, the test, or both." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.493, + 0.492, + 0.554 + ], + "angle": 0, + "content": "The refinement process repeats until the updated design passes simulation or the maximum number of attempts \\( T \\) is reached. Once a design successfully passes its test, the validated triple \\( (s_i, d_i, t_i) \\) is added to the output dataset \\( D' \\)." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.554, + 0.492, + 0.645 + ], + "angle": 0, + "content": "This strategy enables systematic detection and correction of subtle RTL bugs that cannot be identified through syntax checks alone. By integrating LLM-based test generation and iterative refinement into the dataset construction pipeline, we produce a dataset that is not only syntactically valid but also functionally validated through simulation." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.645, + 0.493, + 0.782 + ], + "angle": 0, + "content": "While functional correctness under all possible inputs cannot be guaranteed, the inclusion of unit tests makes our augmented dataset substantially more robust than prior approaches that rely solely on syntactic checking. We view this as a practical and scalable step toward building higher-quality fine-tuning datasets for RTL generation. To assess quality, we manually reviewed 100 randomly sampled examples and found that \\(92\\%\\) of the generated RTL code correctly matched the corresponding natural language descriptions." + }, + { + "type": "title", + "bbox": [ + 0.186, + 0.8, + 0.382, + 0.813 + ], + "angle": 0, + "content": "IV. EXPERIMENTAL SETUP" + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.82, + 0.155, + 0.833 + ], + "angle": 0, + "content": "A. Dataset" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.839, + 0.493, + 0.947 + ], + "angle": 0, + "content": "Following the methodology described in Section III, we construct a fine-tuning dataset comprising 125,777 examples. Each example includes a natural language specification, a corresponding RTL design, and associated unit tests. Table II summarizes key statistics: the specifications contain an average of 247 words (ranging from 116 to 549), RTL implementations average 35 lines of code (ranging from 5 to 225), and unit" + }, + { + "type": "table", + "bbox": [ + 0.527, + 0.064, + 0.903, + 0.151 + ], + "angle": 0, + "content": "
CategoryCountLength
MinMaxAvg
NL specification (words)116549247
Design (lines of RTL)125,777522535
Unit tests (lines of RTL)619755
" + }, + { + "type": "table_caption", + "bbox": [ + 0.504, + 0.156, + 0.924, + 0.202 + ], + "angle": 0, + "content": "TABLE II: Dataset statistics: total number of examples and length distributions for natural language specifications, RTL implementations, and unit tests in the VeriCoder dataset." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.229, + 0.924, + 0.275 + ], + "angle": 0, + "content": "tests average 55 lines (ranging from 6 to 197). We use the specification-solution pairs from this dataset to train our model, VeriCoder." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.296, + 0.698, + 0.311 + ], + "angle": 0, + "content": "B. LoRA Fine-Tuning Setup" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.314, + 0.924, + 0.452 + ], + "angle": 0, + "content": "Following standard practices for LLM fine-tuning, we fine-tune the base model of Qwen2.5-14B-Instruct using Low-Rank Adaptation (LoRA, described in Section II-A), with a rank of 16 and a scaling factor of 32 to all linear projection layers in the transformer. Training is conducted over 3 epochs with a batch size of 40. We adopt a constant learning rate of \\(1 \\times 10^{-5}\\), paired with a linear decay scheduler and a warm-up ratio of 0.05. The optimizer is used with a weight decay of \\(1 \\times 10^{-4}\\) and gradient clipping is applied with a maximum norm of 1." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.472, + 0.702, + 0.485 + ], + "angle": 0, + "content": "C. Benchmarks and Metrics" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.491, + 0.922, + 0.581 + ], + "angle": 0, + "content": "Following the evaluation protocol established in prior work [7], [8], we benchmark against VerilogEval [9] and RTLLM [10]. For VerilogEval, we report the standard Pass@k metric with \\( k \\in \\{1,5,10\\} \\), which estimates the expected probability that at least one of the top- \\( k \\) generated programs passes all test cases. The metric is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.62, + 0.587, + 0.803, + 0.63 + ], + "angle": 0, + "content": "\\[\n\\operatorname {P a s s} @ k = \\mathbb {E} \\left[ 1 - \\frac {\\binom {n - c} {k}}{\\binom {n} {k}} \\right]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.633, + 0.922, + 0.725 + ], + "angle": 0, + "content": "where \\( n \\) is the total number of generated programs and \\( c \\) is the number of correct ones. All test cases are manually created by experts who design the benchmarks. In all evaluations, we set \\( n = 10 \\). For RTLLM, we report both syntax correctness and functional correctness using Pass@5. This evaluation setup aligns with that used in prior work [8]." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.745, + 0.686, + 0.759 + ], + "angle": 0, + "content": "D. Models for Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.763, + 0.922, + 0.9 + ], + "angle": 0, + "content": "We evaluate two groups of models. The first group consists of pretrained-only base models, including OpenAI's latest releases (o4-mini, o3-mini, GPT-4o, GPT-4o-mini), Google's Gemini 2.0 Flash, DeepSeek's R1 and DeepSeek-Coder-7B-v1.5 (the base model used in prior work [8]), Meta's LLaMA2-7B model, and Alibaba's Qwen2.5-14B-Instruct (our base model for fine-tuning). The second group includes fine-tuned models with released weights from prior work: Origen [8], RTLCoder [6], and ChipGPT [27]." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.9, + 0.922, + 0.947 + ], + "angle": 0, + "content": "To ensure a fair comparison, we use identical input prompts and post-processing scripts across all models. For models released by prior work, we do not adopt their model-specific" + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.04 + ], + "angle": 0, + "content": "7" + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.064, + 0.916, + 0.352 + ], + "angle": 0, + "content": "
Model TypeEvaluated ModelVerilogEval V1.0 [9] \n(using pass@k metric)RTLLM V1.1 [10] \n(using pass@5 metric)
Eval-Machine (%)Eval-Human (%)Syntax-VCS (%)Functional (%)
k=1k=5k=10k=1k=5k=10
Base Modelso4-mini-2025-04-1661.967.868.664.366.467.186.272.4
GPT-4o-2024-11-2063.766.567.154.360.462.2100.069.0
GPT-4o-mini-2024-07-1855.762.464.344.751.655.189.765.5
DeepSeek-R165.770.972.062.869.169.979.358.6
o3-mini-2025-01-3166.471.672.062.068.969.969.055.2
Qwen2.5-14B-Instruct47.854.255.235.340.042.369.041.4
Gemini-2.0-flash-00160.362.663.652.157.659.065.534.5
DeepSeek-R1-Distill-Qwen-14B46.264.168.536.751.755.162.134.5
DeepSeek-Coder-7B-v1.544.458.962.925.840.244.948.324.1
LLaMA-2-7B7.015.618.90.42.13.83.40.0
Fine-Tuned Models \n(Prior Work)OriGen [8]35.965.168.522.347.551.951.737.9
RTLCoder-DeepSeek [6]22.051.457.314.735.242.317.210.3
RTLCoder-Mistral [6]17.646.456.612.431.536.53.40.0
ChipGPT-LLaMA3.1-8B-SFT [27]17.646.456.612.431.536.513.80.0
ChipGPT-LLaMA2-SFT-7B [27]0.94.27.70.62.23.86.90.0
Our WorkVeriCoder55.762.964.338.349.251.979.348.3
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.074, + 0.358, + 0.923, + 0.391 + ], + "angle": 0, + "content": "TABLE III: RTL code generation performance across models. To ensure a fair comparison, we use the same input prompts and apply identical post-processing scripts, running inference with model weights released by prior work." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.416, + 0.492, + 0.507 + ], + "angle": 0, + "content": "prompts [8] or inference pipelines [6], [27]. Instead, we apply a uniform evaluation script, with the only variable being the model under test. This standardization is critical, as both input formatting and post-processing can significantly affect performance. By controlling these factors, we isolate model capability and enable a fair comparison." + }, + { + "type": "title", + "bbox": [ + 0.238, + 0.526, + 0.33, + 0.54 + ], + "angle": 0, + "content": "V. RESULTS" + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.547, + 0.268, + 0.56 + ], + "angle": 0, + "content": "A. Main Evaluation Results" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.567, + 0.493, + 0.582 + ], + "angle": 0, + "content": "Table III shows the results. Our major findings are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.583, + 0.493, + 0.822 + ], + "angle": 0, + "content": "a) Comparison with prior work: VeriCoder achieves state-of-the-art results across two RTL code generation benchmarks, outperforming all previously released open-source finetuned models. On VerilogEval-Machine, VeriCoder attains a pass@1 accuracy of \\(55.7\\%\\), representing a 19.8 percentage point improvement over the best prior model, OriGen. On VerilogEval-Human, it reaches \\(38.3\\%\\), exceeding OriGen by 16.0 percentage points. Across all evaluated \\(k\\)-shot settings \\((k = 1, 5, 10)\\), VeriCoder consistently maintains its lead on the Human split. On the RTLLM benchmark, VeriCoder achieves \\(79.3\\%\\) syntax correctness and \\(48.3\\%\\) functional correctness, surpassing OriGen's \\(51.7\\%\\) and \\(37.9\\%\\), respectively. In conclusion, VeriCoder delivers relative improvements of up to \\(71.7\\%\\) on VerilogEval and \\(27.4\\%\\) on RTLLM in pass@k accuracy, surpassing the previous state-of-the-art model on both benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.824, + 0.493, + 0.947 + ], + "angle": 0, + "content": "To better understand the relatively low performance of ChipGPT [27], we examined its outputs in detail. We observed that its generated RTL designs often include module headers that deviate from the given specifications, revealing difficulty in precise instruction following. Moreover, its base model, LLaMA2-7B, performs even worse, suggesting that limitations in the instruction-following capabilities of the underlying pretrained model constrain the effectiveness of the fine-tuned" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.416, + 0.923, + 0.493 + ], + "angle": 0, + "content": "variant. For a fair comparison, we do not apply any of the model-specific customized post-processing scripts that attempt to fix syntax or header issues. Instead, we use a standardized evaluation script for all models, extracting Verilog code as-is to ensure consistency." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.514, + 0.923, + 0.697 + ], + "angle": 0, + "content": "b) Effectiveness of our fine-tuning: Starting from Qwen-2.5-14B-Instruct as our base model, VeriCoder delivers substantial gains across VerilogEval. On the VerilogEvalMachine split, pass@1 jumps up by \\(7.6\\%\\), pass@5 by \\(4.0\\%\\), and pass@10 by \\(2.1\\%\\), and VerilogEval-Human reflects the same trend. On RTLLM, functional pass@5 is \\(7\\%\\) higher than its base model. Specifically, VeriCoder even marginally outperforms one of the commercial models, Google's Gemini2.0-flash, on pass@5 and pass@10 metrics of Eval-Machine as well as on RTLLM. Together, these results demonstrate that our fine-tuning process and our validated dataset significantly boost pass@k metrics and semantic correctness in RTL generation." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.718, + 0.923, + 0.945 + ], + "angle": 0, + "content": "c) Model gap remains: Despite the observed improvements, a substantial performance gap persists between VeriCoder and the strongest large models. For instance, o3-mini attains \\(66.4\\%\\) on VerilogEval Pass@1 compared to VeriCoder's \\(55.7\\%\\). DeepSeek-R1 achieves \\(69.1\\%\\) on human-graded Pass@5, versus VeriCoder's \\(49.2\\%\\). Commercial LLMs such as GPT-4o reach a perfect \\(100.0\\%\\) Syntax-VCS validity and \\(69.0\\%\\) functional correctness, while VeriCoder records \\(79.3\\%\\) and \\(48.3\\%\\), respectively. Despite the performance gap, open-source lightweight models offer compelling advantages. They provide transparency, allow for local deployment, and ensure intellectual property protection, i.e., capabilities that are particularly important for RTL design workflows where security, customizability, and integration into existing toolchains are critical." + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.032, + 0.92, + 0.04 + ], + "angle": 0, + "content": "8" + }, + { + "type": "table", + "bbox": [ + 0.077, + 0.064, + 0.498, + 0.164 + ], + "angle": 0, + "content": "
ModelVerilogEval [9] (Pass@5)RTLLM [10] (Pass@5)
SyntaxFunc
Qwen2.5-14B-Instruct (base)46.869.041.4
Qwen w/ unvalidated data53.575.944.8
Qwen w/ validated data55.879.348.3
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.078, + 0.17, + 0.49, + 0.227 + ], + "angle": 0, + "content": "TABLE IV: We performed fine-tuning on the same base model using a functionally validated dataset and the functionally unvalidated dataset [8]. We report Pass@5 metrics for all models on two benchmarks." + }, + { + "type": "title", + "bbox": [ + 0.079, + 0.261, + 0.275, + 0.275 + ], + "angle": 0, + "content": "B. Ablation Study of Dataset" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.284, + 0.491, + 0.404 + ], + "angle": 0, + "content": "To assess the impact of dataset quality on RTL code generation, we conduct an ablation study using the same base model, Qwen2.5-14B-Instruct, fine-tuned on two datasets: (1) the unvalidated OriGen dataset from prior work [8], and (2) our newly curated, functionally validated dataset. All factors, including dataset size, fine-tuning hyperparameters, training procedures, and evaluation settings, are held constant to ensure a fair comparison." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.406, + 0.49, + 0.57 + ], + "angle": 0, + "content": "Across all metrics, we observe a consistent improvement as dataset quality increases. On the VerilogEval benchmark (covering both Machine and Human subsets), the base model achieves \\(46.8\\%\\) Pass@5. Fine-tuning on the unvalidated dataset raises performance to \\(53.5\\%\\), while our validated dataset further improves it to \\(55.8\\%\\). For RTLLM syntax correctness, the trend is similar: \\(69.0\\%\\) for the base model, \\(75.9\\%\\) for the unvalidated version, and \\(79.3\\%\\) when trained on validated data. Functional correctness sees even more significant improvement, rising from \\(41.4\\%\\) (base) to \\(44.8\\%\\) (unvalidated) and ultimately to \\(48.3\\%\\) (validated)." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.573, + 0.49, + 0.633 + ], + "angle": 0, + "content": "These results demonstrate that functionally validated data provides more effective supervision than existing unvalidated data. This also underscores the importance of dataset quality in fine-tuning LLMs for RTL code generation." + }, + { + "type": "title", + "bbox": [ + 0.079, + 0.665, + 0.407, + 0.678 + ], + "angle": 0, + "content": "C. Test Passing Rates of Non-Validated Datasets" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.687, + 0.491, + 0.852 + ], + "angle": 0, + "content": "We examine the quality of fine-tuning datasets released by prior work by evaluating their passing rates against our synthetic unit tests generated by the teacher model GPT-4o-mini. For each corpus, we randomly sample 1,000 Verilog implementations and apply the test generation and refinement pipeline described in Section III. We then run corresponding unit tests against the original design and measure the proportion of the original designs that successfully pass the generated tests. As shown in Table V, only \\(24.4\\%\\) examples of the RTLCoder dataset [6] pass our functional tests, while OriGen [8] reaches \\(53.5\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.855, + 0.49, + 0.945 + ], + "angle": 0, + "content": "OriGen's higher pass rate aligns with its stronger code generation results in Table III, hinting at a positive link between dataset validity and downstream performance. These findings highlight the potential value of incorporating functional correctness validation into fine-tuning dataset curation for better RTL code generation." + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.064, + 0.911, + 0.119 + ], + "angle": 0, + "content": "
Prior Datasets# Sampled ExamplesTest Passing (%)
RTLCoder [6]100024.4
OriGen [8]100053.5
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.509, + 0.126, + 0.92, + 0.155 + ], + "angle": 0, + "content": "TABLE V: Test passing rates \\((\\%)\\) of datasets released by prior work on a randomly sampled set of 1000 examples." + }, + { + "type": "title", + "bbox": [ + 0.582, + 0.184, + 0.847, + 0.196 + ], + "angle": 0, + "content": "VI. DISCUSSION AND FUTURE WORK" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.201, + 0.921, + 0.366 + ], + "angle": 0, + "content": "While VeriCoder, combining unit test generation with feedback-driven refinement, improves the functional correctness of generated RTL code, it does not fully guarantee correctness. Synthetic test cases may fail to capture all possible edge cases. To address this challenge, future work should explore integrating formal verification techniques into the dataset construction pipeline to rigorously ensure the correctness of the generated code. Recent advancements have demonstrated promising results in translating natural language instructions into formal specifications [16], [38], as well as enforcing formal constraints during LLM-based code generation [39]." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.367, + 0.921, + 0.517 + ], + "angle": 0, + "content": "Moreover, most existing approaches, including VeriCoder, focus on small-scale RTL generation. However, practical hardware development often involves large, repository-level codebases with intricate cross-file dependencies and requirements for long-range context [40]–[42]. Recent work has begun to address these challenges through techniques such as combining fine-tuning with retrieval-augmented RTL code generation [43], [44]. Extending VeriCoder's unit test generation and feedback-directed refinement components to the repository scale will enable LLMs to handle more real-world RTL tasks." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.518, + 0.92, + 0.683 + ], + "angle": 0, + "content": "Furthermore, reinforcement learning (RL) offers a powerful framework for further optimizing large language models' performance beyond what is achievable through supervised fine-tuning alone. Recent studies have demonstrated the effectiveness of RL in enhancing LLM-based code generation by incorporating diverse forms of feedback, such as test case outcomes, compiler diagnostics, and formal verification results [32], [45], [46]. Building on this progress, future work could investigate applying RL techniques to the VeriCoder dataset, using the accompanying test cases as a feedback signal to iteratively improve RTL code generation quality." + }, + { + "type": "title", + "bbox": [ + 0.649, + 0.701, + 0.78, + 0.713 + ], + "angle": 0, + "content": "VII. CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.719, + 0.921, + 0.944 + ], + "angle": 0, + "content": "Recent advances in Large Language Models (LLMs) have opened new possibilities for Electronic Design Automation (EDA), particularly in RTL code generation. However, most existing datasets emphasize syntactic validity while overlooking functional correctness, which limits the effectiveness of finetuned models. We introduce VERICODER, a model fine-tuned on a dataset with 125,000 examples that is validated for functional correctness. This dataset is constructed using a feedback-directed refinement pipeline guided by a teacher LLM, which generates and iteratively updates both RTL designs and unit tests until the design passes simulation. The resulting dataset consists of functionally validated triples comprising a natural language specification, an RTL implementation, and a passing test. Fine-tuned on this dataset, VERICODER achieves state-of-the-art results on two established RTL benchmarks," + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.912, + 0.031, + 0.921, + 0.04 + ], + "angle": 0, + "content": "9" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.07, + 0.493, + 0.162 + ], + "angle": 0, + "content": "yielding relative improvements of up to \\(71.7\\%\\) on VerilogEval and \\(27.4\\%\\) on RTLLM. An ablation study confirms the impact of functional validation on model performance, underscoring the importance of high-quality training data. Future work may explore formal verification and reinforcement learning to further advance AI-assisted hardware design." + }, + { + "type": "title", + "bbox": [ + 0.211, + 0.185, + 0.358, + 0.198 + ], + "angle": 0, + "content": "ACKNOWLEDGMENT" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.208, + 0.495, + 0.255 + ], + "angle": 0, + "content": "We thank Samantha Archer, Yao Hsiao, Mohammad Rahmani Fadiheh and Subhasish Mitra for their discussions. This work was partially supported by a Google Research Award." + }, + { + "type": "title", + "bbox": [ + 0.236, + 0.278, + 0.332, + 0.292 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.303, + 0.493, + 0.338 + ], + "angle": 0, + "content": "[1] M. Liu, T.-D. Ene, R. Kirby, C. Cheng, N. Pinckney, R. Liang, J. Alben, H. Anand, S. Banerjee, I. Bayraktaroglu et al., \"Chipnemo: Domain-adapted llms for chip design,\" arXiv preprint arXiv:2311.00176, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.338, + 0.493, + 0.383 + ], + "angle": 0, + "content": "[2] L. Chen, Y. Chen, Z. Chu, W. Fang, T.-Y. Ho, R. Huang, Y. Huang, S. Khan, M. Li, X. Li et al., \"The dawn of ai-native eda: Opportunities and challenges of large circuit models,\" arXiv preprint arXiv:2403.07257, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.383, + 0.493, + 0.429 + ], + "angle": 0, + "content": "[3] R. Zhong, X. Du, S. Kai, Z. Tang, S. Xu, H.-L. Zhen, J. Hao, Q. Xu, M. Yuan, and J. Yan, \"Llm4eda: Emerging progress in large language models for electronic design automation,\" arXiv preprint arXiv:2401.12224, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.429, + 0.493, + 0.464 + ], + "angle": 0, + "content": "[4] Z. He and B. Yu, “Large language models for eda: Future or mirage?” in Proceedings of the 2024 International Symposium on Physical Design, 2024, pp. 65–66." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.464, + 0.493, + 0.51 + ], + "angle": 0, + "content": "[5] X. Yao, Y. Wang, X. Li, Y. Lian, R. Chen, L. Chen, M. Yuan, H. Xu, and B. Yu, \"Rtlwriter: Methodologies for large models aided rtl code optimization,\" in Proceedings of the 43rd IEEE/ACM International Conference on Computer-Aided Design, 2024, pp. 1-7." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.51, + 0.493, + 0.555 + ], + "angle": 0, + "content": "[6] S. Liu, W. Fang, Y. Lu, J. Wang, Q. Zhang, H. Zhang, and Z. Xie, \"Rtlcoder: Fully open-source and efficient ltm-assisted rtl code generation technique,\" IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.555, + 0.493, + 0.602 + ], + "angle": 0, + "content": "[7] S. Liu, W. Fang, Y. Lu, Q. Zhang, H. Zhang, and Z. Xie, \"Rtlcoder: Outperforming gpt-3.5 in design rtl generation with our open-source dataset and lightweight solution,\" in 2024 IEEE LLM Aided Design Workshop (LAD). IEEE, 2024, pp. 1-5." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.602, + 0.493, + 0.646 + ], + "angle": 0, + "content": "[8] F. Cui, C. Yin, K. Zhou, Y. Xiao, G. Sun, Q. Xu, Q. Guo, D. Song, D. Lin, X. Zhang et al., \"Origen: Enhancing rtl code generation with code-to-code augmentation and self-reflection,\" arXiv preprint arXiv:2407.16237, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.646, + 0.493, + 0.693 + ], + "angle": 0, + "content": "[9] M. Liu, N. Pinckney, B. Khailany, and H. Ren, \"Veriloggeval: Evaluating large language models for verilog code generation,\" in 2023 IEEE/ACM International Conference on Computer Aided Design (ICCAD). IEEE, 2023, pp. 1-8." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.693, + 0.493, + 0.738 + ], + "angle": 0, + "content": "[10] Y. Lu, S. Liu, Q. Zhang, and Z. Xie, \"Rtllm: An open-source benchmark for design rtl generation with large language model,\" in 2024 29th Asia and South Pacific Design Automation Conference (ASP-DAC). IEEE, 2024, pp. 722-727." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.738, + 0.493, + 0.773 + ], + "angle": 0, + "content": "[11] Y. Tsai, M. Liu, and H. Ren, \"Rtlfixer: Automatically fixing rtI syntax errors with large language model,\" in Proceedings of the 61st ACM/IEEE Design Automation Conference, 2024, pp. 1-6." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.773, + 0.493, + 0.807 + ], + "angle": 0, + "content": "[12] Y. Liao, T. Adegbija, and R. Lysecky, \"Are llms any good for high-level synthesis?\" in Proceedings of the 43rd IEEE/ACM International Conference on Computer-Aided Design, 2024, pp. 1-8." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.808, + 0.493, + 0.853 + ], + "angle": 0, + "content": "[13] Y. Fu, Y. Zhang, Z. Yu, S. Li, Z. Ye, C. Li, C. Wan, and Y. C. Lin, \"Gpt4aigchip: Towards next-generation ai accelerator design automation via large language models,\" in 2023 IEEE/ACM International Conference on Computer Aided Design (ICCAD). IEEE, 2023, pp. 1-9." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.853, + 0.493, + 0.899 + ], + "angle": 0, + "content": "[14] Z. Yan, Y. Qin, X. S. Hu, and Y. Shi, \"On the viability of using llms for sw/hw co-design: An example in designing cim dnn accelerators,\" in 2023 IEEE 36th International System-on-Chip Conference (SOCC). IEEE, 2023, pp. 1-6." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.899, + 0.493, + 0.944 + ], + "angle": 0, + "content": "[15] Z. Liang, J. Cheng, R. Yang, H. Ren, Z. Song, D. Wu, X. Qian, T. Li, and Y. Shi, \"Unleashing the potential of llms for quantum computing: A study in quantum architecture design,\" arXiv preprint arXiv:2307.08191, 2023." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.303, + 0.493, + 0.944 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.071, + 0.922, + 0.118 + ], + "angle": 0, + "content": "[16] M. Cosler, C. Hahn, D. Mendoza, F. Schmitt, and C. Trippel, \"nl2spec: Interactively translating unstructured natural language to temporal logics with large language models,\" in International Conference on Computer Aided Verification. Springer, 2023, pp. 383-396." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.118, + 0.922, + 0.163 + ], + "angle": 0, + "content": "[17] C. Sun, C. Hahn, and C. Trippel, \"Towards improving verification productivity with circuit-aware translation of natural language to systemverilog assertions,\" in First International Workshop on Deep Learning-aided Verification, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.163, + 0.922, + 0.208 + ], + "angle": 0, + "content": "[18] H. Wu, Z. He, X. Zhang, X. Yao, S. Zheng, H. Zheng, and B. Yu, \"Chateda: A large language model powered autonomous agent for eda,\" IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.208, + 0.922, + 0.242 + ], + "angle": 0, + "content": "[19] Z. Xiao, X. He, H. Wu, B. Yu, and Y. Guo, \"Eda-copilot: A ragpowered intelligent assistant for eda tools,\" ACM Transactions on Design Automation of Electronic Systems, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.242, + 0.922, + 0.287 + ], + "angle": 0, + "content": "[20] K. Xu, J. Sun, Y. Hu, X. Fang, W. Shan, X. Wang, and Z. Jiang, \"Meic: Re-thinking rtl debug automation using llms,\" in Proceedings of the 43rd IEEE/ACM International Conference on Computer-Aided Design, 2024, pp. 1-9." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.287, + 0.922, + 0.322 + ], + "angle": 0, + "content": "[21] R. Li, L. B. Allal, Y. Zi, N. Muennighoff, D. Kocetkov, C. Mou, M. Marone, C. Akiki, J. Li, J. Chim et al., \"Starcoder: may the source be with you!\" arXiv preprint arXiv:2305.06161, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.322, + 0.922, + 0.356 + ], + "angle": 0, + "content": "[22] A. Lozhkov, R. Li, L. B. Allal, F. Cassano, J. Lamy-Poirier, N. Tazi, A. Tang, D. Pykhtar, J. Liu, Y. Wei et al., \"Starcoder 2 and the stack v2: The next generation,\" arXiv preprint arXiv:2402.19173, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.356, + 0.922, + 0.389 + ], + "angle": 0, + "content": "[23] E. Dehaerne, B. Dey, S. Halder, and S. De Gendt, “A deep learning framework for verilog autocompletion towards design and verification automation,” arXiv preprint arXiv:2304.13840, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.389, + 0.922, + 0.423 + ], + "angle": 0, + "content": "[24] Z. Pei, H.-L. Zhen, M. Yuan, Y. Huang, and B. Yu, \"Betterv: Controlled verilog generation with discriminative guidance,\" arXiv preprint arXiv:2402.03375, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.423, + 0.922, + 0.469 + ], + "angle": 0, + "content": "[25] S. Thakur, B. Ahmad, Z. Fan, H. Pearce, B. Tan, R. Karri, B. Dolan-Gavitt, and S. Garg, \"Benchmarking large language models for automated verilog RTL code generation,\" in 2023 Design, Automation & Test in Europe Conference & Exhibition (DATE). IEEE, 2023, pp. 1-6." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.469, + 0.922, + 0.514 + ], + "angle": 0, + "content": "[26] S. Thakur, B. Ahmad, H. Pearce, B. Tan, B. Dolan-Gavitt, R. Karri, and S. Garg, \"Verigen: A large language model for verilog code generation,\" ACM Transactions on Design Automation of Electronic Systems, vol. 29, no. 3, pp. 1-31, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.514, + 0.922, + 0.571 + ], + "angle": 0, + "content": "[27] K. Chang, K. Wang, N. Yang, Y. Wang, D. Jin, W. Zhu, Z. Chen, C. Li, H. Yan, Y. Zhou et al., \"Data is all you need: Finetuning llms for chip design via an automated design-data augmentation framework,\" in Proceedings of the 61st ACM/IEEE Design Automation Conference, 2024, pp. 1-6." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.571, + 0.922, + 0.605 + ], + "angle": 0, + "content": "[28] E. J. Hu, Y. Shen, P. Wallis, Z. Allen-Zhu, Y. Li, S. Wang, L. Wang, W. Chen et al., “Lora: Low-rank adaptation of large language models.” *ICLR*, vol. 1, no. 2, p. 3, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.605, + 0.922, + 0.661 + ], + "angle": 0, + "content": "[29] M. Liu, Y.-D. Tsai, W. Zhou, and H. Ren, \"Craftrtl: High-quality synthetic data generation for verilog code models with correct-by-construction non-textual representations and targeted code repair,\" ArXiv, vol. abs/2409.12993, 2024. [Online]. Available: https://api_semanticscholar.org/CorpusID:272770433" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.661, + 0.922, + 0.695 + ], + "angle": 0, + "content": "[30] Y. Zhang, Z. Yu, Y. Fu, C. Wan, and Y. C. Lin, \"Mg-verilog: Multi-grained dataset towards enhanced llm-assisted verilog generation,\" in 2024 IEEE LLM Aided Design Workshop (LAD). IEEE, 2024, pp. 1-5." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.695, + 0.922, + 0.729 + ], + "angle": 0, + "content": "[31] E. Goh, M. Xiang, I. Wey, T. H. Teo et al., “From english to asi: Hardware implementation with large language model,” arXiv preprint arXiv:2403.07039, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.729, + 0.922, + 0.774 + ], + "angle": 0, + "content": "[32] S. Liu, Y. Lu, W. Fang, M. Li, and Z. Xie, \"Openllm-rtl: Open dataset and benchmark for llm-aided design rtl generation,\" in Proceedings of the 43rd IEEE/ACM International Conference on Computer-Aided Design, 2024, pp. 1-9." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.774, + 0.922, + 0.82 + ], + "angle": 0, + "content": "[33] C. Deng, Y.-D. Tsai, G.-T. Liu, Z. Yu, and H. Ren, \"Scalertl: Scaling llms with reasoning data and test-time compute for accurate rtl code generation,\" ArXiv, vol. abs/2506.05566, 2025. [Online]. Available: https://api-semanticscholar.org/CorpusID:279243692" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.82, + 0.922, + 0.865 + ], + "angle": 0, + "content": "[34] Y. Liu, C. Xu, Y. Zhou, Z. Li, and Q. Xu, \"Deeprl: Bridging verilog understanding and generation with a unified representation model,\" ArXiv, vol. abs/2502.15832, 2025. [Online]. Available: https://api-semanticscholar.org/CorpusID:276574886" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.865, + 0.922, + 0.911 + ], + "angle": 0, + "content": "[35] N. Wang, B. Yao, J. Zhou, X. Wang, Z. Jiang, and N. Guan, \"Large language model for verilog generation with golden code feedback,\" ArXiv, vol. abs/2407.18271, 2024. [Online]. Available: https://api_semanticscholar.org/CorpusID:271516462" + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.91, + 0.922, + 0.944 + ], + "angle": 0, + "content": "[36] M. Gao, J. Zhao, Z. Lin, W. Ding, X. Hou, Y. Feng, C. Li, and M. Guo, \"Autovcoder: A systematic framework for automated verilog code generation using llms,\" 2024 IEEE 42nd International Conference" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.071, + 0.922, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "page_number", + "bbox": [ + 0.908, + 0.032, + 0.921, + 0.041 + ], + "angle": 0, + "content": "10" + }, + { + "type": "ref_text", + "bbox": [ + 0.106, + 0.072, + 0.492, + 0.095 + ], + "angle": 0, + "content": "on Computer Design (ICCD), pp. 162-169, 2024. [Online]. Available: https://api(semanticscholar.org/CorpusID:271516210" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.096, + 0.493, + 0.152 + ], + "angle": 0, + "content": "[37] Y. Zhao, D. Huang, C. Li, P. Jin, Z. Nan, T. Ma, L. Qi, Y. Pan, Z. Zhang, R. Zhang, X. Zhang, Z. Du, Q. Guo, X. Hu, and Y. Chen, \"Codev: Empowering llms withhdl generation through multi-level summarization,\" 2024. [Online]. Available: https://api_semanticscholar.org/CorpusID:271212791" + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.152, + 0.493, + 0.196 + ], + "angle": 0, + "content": "[38] D. Mendoza, C. Hahn, and C. Trippel, \"Translating natural language to temporal logics with large language models and model checkers,\" in 2024 Formal Methods in Computer-Aided Design (FMCAD), 2024, pp. 1-11." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.197, + 0.492, + 0.231 + ], + "angle": 0, + "content": "[39] P. Aggarwal, B. Parno, and S. Welleck, \"Alphaverus: Bootstrapping formally verified code generation through self-improving translation and treefinement,\" arXiv preprint arXiv:2412.06176, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.231, + 0.492, + 0.265 + ], + "angle": 0, + "content": "[40] C. E. Jimenez, J. Yang, A. Wettig, S. Yao, K. Pei, O. Press, and K. Narasimhan, \"Swe-bench: Can language models resolve real-world github issues?\" arXiv preprint arXiv:2310.06770, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.265, + 0.492, + 0.31 + ], + "angle": 0, + "content": "[41] T. Suresh, R. G. Reddy, Y. Xu, Z. Nussbaum, A. Mulyar, B. Duderstadt, and H. Ji, \"Cornstack: High-quality contrastive data for better code retrieval and reranking,\" in The Thirteenth International Conference on Learning Representations, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.31, + 0.492, + 0.344 + ], + "angle": 0, + "content": "[42] N. Jain, M. Shetty, T. Zhang, K. Han, K. Sen, and I. Stoica, “R2e: Turning any github repository into a programming agent environment,” in ICML, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.344, + 0.492, + 0.378 + ], + "angle": 0, + "content": "[43] P. Wu, N. Guo, J. Lv, X. Xiao, and X. Ye, \"RtlrepEncoder: Repository-level rtl code completion through the combination of fine-tuning and retrieval augmentation,\" arXiv preprint arXiv:2504.08862, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.378, + 0.492, + 0.422 + ], + "angle": 0, + "content": "[44] Z. Li, C. Xu, Z. Shi, Z. Peng, Y. Liu, Y. Zhou, L. Zhou, C. Ma, J. Zhong, X. Wang et al., \"Deepcircuits: A comprehensive repository-level dataset for rtl code understanding, generation, and ppa analysis,\" arXiv preprint arXiv:2502.18297, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.422, + 0.492, + 0.457 + ], + "angle": 0, + "content": "[45] N. Wang, B. Yao, J. Zhou, X. Wang, Z. Jiang, and N. Guan, “Large language model for verilog generation with golden code feedback,” arXiv preprint arXiv:2407.18271, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.078, + 0.457, + 0.492, + 0.491 + ], + "angle": 0, + "content": "[46] J. Wang, Z. Zhang, Y. He, Y. Song, T. Shi, Y. Li, H. Xu, K. Wu, G. Qian, Q. Chen et al., “Enhancing code llms with reinforcement learning in code generation,” arXiv preprint arXiv:2412.20367, 2024." + }, + { + "type": "list", + "bbox": [ + 0.078, + 0.072, + 0.493, + 0.491 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15659/c7f88426-e408-4745-97f1-882178397313_origin.pdf b/data/2025/2504_15xxx/2504.15659/c7f88426-e408-4745-97f1-882178397313_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..aca49f8b0ddd541d2745eb57aa96fdfae4753736 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15659/c7f88426-e408-4745-97f1-882178397313_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:946cff9bbd1ad9ada2aaf77f0e2e107b2bee428c1f0a437b1131407ba8250c9f +size 439160 diff --git a/data/2025/2504_15xxx/2504.15659/full.md b/data/2025/2504_15xxx/2504.15659/full.md new file mode 100644 index 0000000000000000000000000000000000000000..82448543fa21b6fdbe7dc99a7346bd36e49c5fe5 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15659/full.md @@ -0,0 +1,388 @@ +# VeriCoder: Enhancing LLM-Based RTL Code Generation through Functional Correctness Validation + +Anjiang Wei $①$ , Huanmi Tan $①$ , Tarun Suresh $①$ , Daniel Mendoza $①$ , Thiago S. F. X. Teixeira $①$ , Ke Wang $①$ , Caroline Trippel $①$ , and Alex Aiken $①$ + +Abstract—Recent advances in Large Language Models (LLMs) have sparked growing interest in applying them to Electronic Design Automation (EDA) tasks, particularly Register Transfer Level (RTL) code generation. While several RTL datasets have been introduced, most focus on syntactic validity rather than functional validation with tests, leading to training examples that compile but may not implement the intended behavior. We present VERICODER, a model for RTL code generation fine-tuned on a dataset validated for functional correctness. This fine-tuning dataset is constructed using a novel methodology that combines unit test generation with feedback-directed refinement. Given a natural language specification and an initial RTL design, we prompt a teacher model (GPT-4o-mini) to generate unit tests and iteratively revise the RTL design based on its simulation results using the generated tests. If necessary, the teacher model also updates the tests to ensure they comply with the natural language specification. As a result of this process, every example in our dataset is functionally validated, consisting of a natural language description, an RTL implementation, and passing tests. Fine-tuned on this dataset of 125,777 examples, VERICODER achieves state-of-the-art metrics in functional correctness on VerilogEval and RTLLM, with relative gains of up to $71.7\%$ and $27.4\%$ , respectively. An ablation study further shows that models trained on our functionally validated dataset outperform those trained on functionally non-validated datasets, underscoring the importance of high-quality datasets in RTL code generation. Our code, data, and models are publicly available at https://github.com/Anjiang-Wei/VeriCoder + +Index Terms—RTL, Code Generation, Large Language Model. + +# I. INTRODUCTION + +Large Language Models (LLMs) have demonstrated remarkable performance across natural language processing tasks, spurring growing interest in applying their capabilities to a broad range of Electronic Design Automation (EDA) problems [1]–[4]. Recent efforts explore LLMs for code generation [5]–[12], architecture design [13]–[15], verification [16], [17], tool assistance [18], [19], and debugging [1], [20]. In this work, we focus on generating Register Transfer Level (RTL) code from natural language specifications. Automating RTL code generation has the potential to significantly boost hardware design productivity and reduce the manual effort + +Anjiang Wei, Daniel Mendoza, Caroline Trippel, and Alex Aiken are affiliated with Stanford University (e-mail: anjiang@cs.stanford.edu; dmendo@stanford.edu; trippel@stanford.edu; aiken@cs.stanford.edu). + +Huanmi Tan is affiliated with Carnegie Mellon University (e-mail: huanmi.tan@gmail.com). + +Tarun Suresh is affiliated with University of Illinois Urbana-Champaign (e-mail: tsuresh3@illinois.edu). + +Thiago S. F. X. Teixeira is with Intel Corporation (e-mail: thiago.teixeira@intel.com). + +Ke Wang is with Nanjing University (e-mail: kwg@nju.edu.cn). + +involved in complex design tasks, making it a timely and impactful area of research. + +Developing open-source, lightweight models for RTL code generation is essential for advancing both research and deployment. Proprietary models such as GPT-4o and Claude 3.7 restrict customization and lack transparency, making them unsuitable for in-depth analysis and academic exploration. They also raise privacy and security concerns, especially when handling RTL designs that may contain sensitive intellectual property. In contrast, lightweight models that can run locally offer a secure, privacy-preserving alternative—enabling hardware engineers to integrate AI directly into their design workflows. However, existing open-source models still underperform on RTL tasks, largely due to the absence of high-quality, functionally validated RTL datasets in their training corpora [21], [22]. While training algorithms are readily available, progress is bottlenecked by the lack of open datasets with functional correctness validation. + +A key challenge in building such datasets lies in constructing large-scale, high-quality training data that pairs natural language specifications with RTL implementations. Despite efforts to mine RTL code from open-source repositories [23]–[26], much of the collected data lacks validation and may not align with its intended functionality. To address this, recent work has turned to LLMs—either prompting them to synthesize RTL designs from keyword-based specifications [6], [7] or leveraging them to rewrite existing RTL code and generate matching specifications [8], [24], [26]. In both cases, syntax checkers are often employed to filter uncompilable code or provide feedback for iterative refinement, but these techniques still fall short of validating functional correctness. + +As far as we know, all these prior work [6]–[8], [24], [26] have focused solely on ensuring syntactic correctness, overlooking functional correctness. As a result, many dataset examples compile successfully but may not implement the behavior described in their natural language specifications. The distinction between syntactic correctness and functional correctness has important implications for model evaluation and real-world deployment. While functionally correct code inherently satisfies syntax constraints, syntactic correctness alone does not guarantee correct functionality. This gap is evident in the results reported by the RTLLM benchmark [10], where GPT-4o attains a high syntax accuracy of $100.0\%$ , yet achieve only $69.0\%$ in terms of functional correctness. Ultimately, in real-world settings, it is functional correctness rather than syntactic validity that truly matters. + +In this work, we introduce VeriCoder, a model for RTL code generation fine-tuned on a high-quality dataset consisting of 125,777 examples that has been validated for functional + +
Prior WorkStrategyDescriptionSyntax CheckerUnit Tests
RTLCoder [7]Keyword-based Generation, MutationPrompt LLM with keywords and existing code, followed by iterative mutation to get instruction-code pairs.X
OriGen [8]Code-to-Code, Syntax Error CorrectionApplies LLM-driven code-to-code pipeline on existing RTL code and filters them by compiler error feedback.X
BetterV [24]Web Scraping & Cleaning, Alignment with CLarge-scale web-collected Verilog, cleaned and filtered to enforce coding standards; aligns C with Verilog.X
VeriGen [26]Manually Collect Textbook and Open-Source CodeMines real-world RTL from GitHub and textbooks, manually cleans and organizes them into a structured dataset.X
ChipGPT [27]AST-based SynthesisConverts Verilog ASTs into natural-language prompts and injects semantic error variants via EDA-tool feedback.X
VeriCoder (Our Work)Feedback-Directed Refinement, Simulation, Unit Test GenerationIteratively generate unit tests with a teacher LLM, check implementations via compiler and simulator, and refining designs and tests until each design passes.
+ +TABLE I: Comparison of Verilog fine-tuning dataset construction approaches. + +correctness1. To construct this dataset, we develop a novel pipeline that combines unit test generation with feedback-directed refinement guided by a teacher LLM (GPT-4o-mini). Given a natural language specification and an initial RTL implementation, the teacher model first generates a unit test. If the RTL code fails the simulation, the model iteratively revises the design based on the observed error messages. When needed, the unit test is also updated to better reflect the intended functionality described by the specification. This process continues until the design passes simulation or a retry limit is reached. The resulting fine-tuning dataset consists of 125,777 validated triples: a natural language specification, a correct RTL design, and a self-checking unit test. + +We fine-tune VeriCoder from Qwen2.5-14B-Instruct using our curated dataset and evaluate it on two established RTL code generation benchmarks: VerilogEval [9] and RTLLM [10]. VeriCoder achieves new state-of-the-art performance, achieving up to $71.7\%$ and $27.4\%$ relative gains in the pass@k metric over the previous best fine-tuned model OriGen [8]. + +We conduct an ablation study demonstrating that models trained on our functionally validated dataset outperform those trained on non-validated data, under the same base model and training setup. These results highlight the importance of high-quality, functionally validated datasets for RTL code generation. + +Our contributions are as follows: + +- We introduce VeriCoder, an RTL code generation model fine-tuned on a dataset validated for functional correctness. On the VerilogEval and RTLLM benchmarks, VeriCoder achieves state-of-the-art performance among open-source fine-tuned models, yielding relative pass@k gains of up to $71.7\%$ and $27.4\%$ over the prior best. +- We develop a dataset augmentation pipeline that combines unit test generation with feedback-directed refinement guided by a teacher LLM. This yields, to the best of our knowledge, the largest fine-tuning dataset to date with functional validation, consisting of 125,777 validated + +1While functional correctness is not fully guaranteed, we manually reviewed 100 randomly sampled examples and found that $92\%$ of the generated RTL code correctly matches the corresponding natural language descriptions. + +triples of natural language specifications, RTL designs, and passing tests. + +- We conduct an ablation study showing that functional validation during dataset construction improves model performance, underscoring the importance of using high-quality functionally validated datasets for RTL code generation. + +# II. BACKGROUND AND RELATED WORK + +# A. Language Modeling and Fine-Tuning + +Large Language Models (LLMs) are deep neural networks trained to perform language modeling, a task where the model learns to predict the next token in a sequence. Formally, given a sequence of tokens $x = (x_{1}, x_{2}, \ldots, x_{T})$ , the training objective is to maximize the log-likelihood: + +$$ +\mathcal {L} _ {\mathrm {L M}} = \sum_ {t = 1} ^ {T} \log P \left(x _ {t} \mid x _ {< t}; \theta\right), \tag {1} +$$ + +where $\theta$ denotes the model parameters and $x_{< t} = (x_1, \ldots, x_{t-1})$ represents the context tokens. This autoregressive objective enables the model to generate coherent text and capture long-range dependencies across various domains. + +The training of LLMs is typically organized into two stages: + +- Pre-training: The model is trained on massive, diverse corpora (e.g., web data, books, source code) to acquire broad knowledge and language understanding. This stage is expensive and performed once per model. +- Post-training: The pre-trained model is adapted to specific tasks using smaller, curated datasets. This stage includes supervised fine-tuning (SFT), where the model is trained on task-specific input-output pairs. + +Since post-training large models from scratch is resource-intensive, researchers have developed parameter-efficient fin-tuning methods. One widely used approach is Low-Rank Adaptation (LoRA) [28]. Instead of updating the full weight matrices $W \in \mathbb{R}^{d \times k}$ in each linear layer, LoRA freezes the original weights and introduces a trainable low-rank update: + +![](images/5c1e7c992b49adf8de5ea90499d94e6e076d00b68a9c604449489cfdfdbc9a13.jpg) +Fig. 1: LLM-guided dataset augmentation overview. + +![](images/36bafefd15fa00a56e86e549967356d7fadc2ba905d753cdbfae75fdbec5b723.jpg) +(a) Natural language specification taken from the Origen [8] dataset. + +![](images/ca37e8cb215359fe9ab488e17022939fb18c1f5bc9ddee7a7e9156793fa8f592.jpg) +(b) Buggy design taken from the Origen [8] dataset. It times out on the generated test shown in Figure 3. +Fig. 2: Natural language specification (left) and the corresponding buggy and corrected Verilog designs (middle and right). The specification and buggy design are from the original dataset [8], which lacks tests, while the test (Figure 3) and corrected design are generated by a teacher model (GPT-4o-mini) and included in our validated dataset. + +![](images/4a5ec9cc51cb0015f2ab1c778411473f2c44ef14e45f1231bc3002fe848b6dfa.jpg) +(c) Correct design fixed by the teacher model that passes the generated test in Figure 3. + +$$ +W ^ {\prime} = W + \Delta W = W + A B, \tag {2} +$$ + +where $A \in \mathbb{R}^{d \times r}$ and $B \in \mathbb{R}^{r \times k}$ , and $r \ll \min(d, k)$ . Only $A$ and $B$ are updated during training, while $W$ remains unchanged. This technique reduces both memory and compute overhead during adaptation, making it feasible to specialize large LLMs to domain-specific applications, such as RTL generation, with limited computational resources. + +# B. Related Work on RTL Code Generation + +Progress on open-source RTL code generation is limited by the absence of large-scale, high-quality datasets. To mitigate this, recent efforts have focused on automated data mining and augmentation techniques to enrich existing corpora of RTL examples. Table I presents the comparison of different strategies for constructing fine-tuning datasets. + +Mining open-source RTL designs is a common strategy for dataset construction. VeriGen [26] compiles Verilog modules from GitHub and textbooks into a structured corpus using automated syntax checks. BetterV [24] collects Verilog modules from the internet and then filters designs based on coding style + +and syntactic validity. CraftRTL [29] augments fine-tuning data with non-textual code representations, injecting synthetic errors derived from intermediate model checkpoints into open-source Verilog code. Other works [8], [30], [31] adopt similar methodologies for sourcing and preprocessing RTL code. + +Another line of work leverages a commercial LLM for synthetic data generation. RTLCoder [6] prompts GPT-3.5 with domain keywords to generate both task descriptions and corresponding RTL, discarding any outputs that fail to compile. OriGen [8] further employs Claude 3.5 in a two-stage code-to-code pipeline: first turning mined RTL code into natural language specifications, then regenerating code from these specifications under compiler guidance, combining the strengths of real-world examples and synthetic generation. ChipGPT [27] transforms Verilog ASTs into natural language specifications. + +While most of the existing work listed in Table I ensures syntax validity, none of them has any evidence of functional correctness. Without comprehensive unit tests or simulation-based feedback during dataset construction, models fine-tuned on these corpora may produce code that compiles but still fails to meet the intended natural language specification. + +A recent work, OpenLLM-RTL [32], explores the idea of + +```verilog +`timescale 1ns/1ps +module tb_and3; +reg a = 0, b = 0, c = 0; +wire y; +// Instantiate the DUT (Design Under Test) +and3 uut (.a(a), .b(b), .c(c), .y(y)); +initial begin +// Wait for signals to settle +#1; +// Set all inputs to 1; expected y = 1 +{a, b, c} = 3'b111; +#1; +// Check output, report error if incorrect +if (y != 1'B1) +$fatal(1, "FAIL: y=%b (expected 1)", y); +$display("PASS"); +$finish; +end +endmodule +``` + +Fig. 3: Unit test for the and3 module. The buggy design (Figure 2b) times out on this test, while the corrected design (Figure 2c) passes successfully. The test is generated by the teacher model GPT-4o-mini using the prompt in Figure 4a, and is used to validate and augment the original dataset, which contains no tests. + +using LLMs to generate assertions, producing a functionally verified dataset of 7k examples. While sharing the same goal of improving functional correctness in fine-tuning datasets, our work takes a different approach by generating unit tests for validation. Our final dataset contains over 125,777 examples—the largest functionally validated RTL dataset to date. + +Beyond data collection and synthesis techniques, several works explore other methods to enhance RTL code generation quality. ScaleRTL [33] emphasizes reasoning by generating intermediate traces and leveraging test-time compute through iterative self-reflection. DeepRTL [34] adopts curriculum learning guided by multi-level natural language summaries. VeriSeek [35] applies reinforcement learning with feedback derived from AST-level similarity between LLM outputs and reference designs. AutoVCoder [36] incorporates retrieval-augmented generation (RAG), dynamically supplying relevant Verilog snippets to the model. CodeV [37] extends generation capabilities to tasks such as fill-in-the-middle (FIM). Our work adopts standard supervised fine-tuning while focusing on constructing a large-scale, functionally validated dataset. Our approach is complementary and orthogonal to existing techniques. + +# III. METHODOLOGY + +# A. Overview + +We aim to improve the quality of fine-tuning datasets consisting of natural language specifications paired with syntactically correct Verilog designs, as seen in prior work [6]–[8], [24], [26]. These datasets, including Origen [8], contain + +Verilog designs that pass syntax checks but are not validated against unit tests to ensure functional correctness. To address this limitation, we introduce an automated dataset augmentation pipeline that leverages a teacher language model, e.g., GPT-40-mini, to validate each example through iterative refinement. As illustrated in Figure 1, given a natural language specification and an initial RTL design, the teacher model first generates a unit test. If the RTL design fails the simulation, the model iteratively revises the design based on the error message. When needed, it also updates the unit test to better align with the natural language specification. Although our experiments focus on augmenting the Origen dataset due to its size and quality, the proposed methodology is broadly applicable to any dataset lacking test validation. + +The pipeline begins with the original dataset $D = \{(\text{specification}, \text{design})\}$ , where each RTL design is intended to implement a corresponding natural language specification. However, because no tests are provided, there is no evidence that the designs exhibit the intended functional behavior. For each pair, we prompt the teacher model, GPT-4o-mini, to generate a unit test for the design. The test is compiled and simulated with the design to check for correctness, where correctness means the design passes the simulation test. + +If the simulation fails, we extract the resulting error message and re-invoke the teacher model using a refinement prompt. This prompt includes the specification, the current design and test, and the error message. The model attempts to resolve the failure by making minimal modifications to the design, the test, or both. This refinement process repeats iteratively: each candidate is re-simulated, and the cycle continues until the design passes the test or a maximum number of attempts is reached. + +The final output is a validated dataset $D' = \{(\text{specification}, \text{design}, \text{test})\}$ , where each triplet contains a natural language specification, a Verilog design, and unit tests. A concrete motivating example is shown in Section III-B, and the details of the algorithm and prompts are provided in Section III-C. + +# B. Motivating Example + +Figure 2 presents a motivating example taken directly from the Origen dataset [8], highlighting a key limitation of datasets that rely only on syntax checks for validation. Prior work in RTL generation typically assumes that syntactic correctness is sufficient for fine-tuning, without verifying functionality through unit tests. This example demonstrates that a design can compile without errors yet fail to implement the intended behavior. It also illustrates how our method can automatically detect and correct such issues through test generation and iterative refinement. + +This example includes a natural language specification (Figure 2a), a buggy RTL design from the original dataset (Figure 2b), and a corrected design produced by our pipeline (Figure 2c). The specification describes a simple combinational module, and3, which computes the bitwise AND of three one-bit inputs: a, b, and c. + +The original design, though syntactically valid, is functionally incorrect due to several semantic issues. First, it misuses non- + +# Prompt Template + +System Prompt You are a Verilog design and testing expert. Given a hardware specification described in natural language, your job is to generate both a correct Verilog module and a corresponding unit test that checks its functionality through simulation. + +# User Prompt + +- Natural Language Specification: {NL Spec} +- Initial Implementation: {design} +- Your task: + +1) Provide the unit tests for the given design. +2) Revise the Verilog implementation if the original design fails to pass your test cases. +3) Follow good coding practices, such as using meaningful comments to document key logic and decision points. +4) Use $fatal(1, "msg") to flag incorrect behavior. +5) Output format: {"design": "...", "test": "..."} + +(a) Prompt for generating a Verilog module's corresponding test + +# Prompt Template + +System Prompt You are a Verilog design and testing expert. Analyze a failing design and its test, and make minimal yet sufficient edits to correct the issue while preserving the intended behavior specified in natural language. + +# User Prompt + +- Natural Language Specification: {NL Spec} +- Previous Design and Test: {design}, {test} +- Simulation Output: {error message} +- Your task: + +1) Carefully identify the root cause of the failure by analyzing the code and the error message. +2) Make changes to either the design or the test (or both) to resolve the issue while maintaining correctness. +3) Output format: {"explanation": "...", "design": "...", "test": "..."} + +(b) Prompt for refining a failing Verilog design and test + +Fig. 4: Prompt templates provided to the teacher model for automated Verilog test generation and refinement, ensuring that the final design passes the generated test and matches the original natural language specification. + +blocking assignments $(<=)$ inside a combinational always @\* block, which can lead to counterintuitive synthesis results. Second, if instead used inside a sequential block, the sequence of non-blocking assignments in the design-y <= a, then y <= y & c, and finally y <= y & b—does not correctly compute and store in y the bitwise AND of a, b, and c. In particular, non-blocking assignments defer updates until the end of the current timestep, meaning that all assignments operate on the same initial value of y, and only the final assignment takes effect. Finally, if the non-blocking assignments were replaced with blocking ones, the code would introduce a combinational feedback loop, which cannot stabilize. + +These types of errors occur because the RTL code in prior datasets, including Origen [8], is synthetically generated by teacher LLMs such as Claude 3.5 and filtered only through syntax checks. Without simulation or test-based validation, semantic bugs that affect functional correctness remain undetected. + +We provide the natural language specification and the buggy RTL design to the teacher model GPT-4o-mini, prompting it to generate a unit test using the template shown in Figure 4a (further detailed in Section III-C). The resulting test is shown in Figure 3, which sets all three inputs to 1 and checks whether the output y evaluates to 1 as expected. When the buggy design (Figure 2b) is simulated with this test, it hangs and ultimately times out. The bug exemplifies a combinational loop. The always @* block is meant for combinational logic and its evaluation is triggered upon changes to any of the variables read inside the block. In this case, an evaluation of the block is triggered when either y, a, b, or c changes. However, y is both read (on the RHS) and written (on the LHS) in the same block. Upon evaluating the block, it schedules an update to y, which causes a change to y. This change retriggers the block, leading to another scheduled update to y, and so on. This loop continues indefinitely, preventing the simulation from converging. + +The corrected version replaces the non-blocking assignments with a single blocking assignment $(=)$ , ensuring that $y$ is updated + +Algorithm 1 Dataset Augmentation with a Teacher LLM +Input: Original dataset $D = \{(s_i,d_i)\}_{i = 1}^N$ $\triangleright s_i$ : NL specification; $d_{i}$ : RTL design Maximum attempts $T$ +Define: GenTestTpl $\leftarrow$ prompt template for test generation RefineTpl $\leftarrow$ prompt template for iterative refinement +Output: Augmented dataset $D^{\prime} = \{(s_{i},d_{i},t_{i})\}_{i = 1}^{M}$ $\triangleright t_i$ : Generated unit test +1: $D^{\prime}\gets \emptyset$ +2: for each $(s,d)\in D$ do +3: attempt $\leftarrow 0$ success $\leftarrow$ false +4: while attempt $< T$ ∧ ¬success do +5: attempt $\leftarrow$ attempt + 1 +6: if attempt $= = 1$ then +7: d,t $\leftarrow$ LLMInvoke(GenTestTpl,s,d) +8: else +9: d,t $\leftarrow$ LLMInvoke(RefineTpl,s,d,t,err) +10: success, err $\leftarrow$ RunVerilogTest(d,t) +11: if success then +12: $D^{\prime}\gets D^{\prime}\cup \{(s,d,t)\}$ +13: return $D^{\prime}$ + +immediately with the result of a & b & c, as required by the specification. This version passes the test generated by the teacher model and behaves correctly under simulation. + +This example underscores the importance of functional validation in RTL datasets. Syntax checks alone cannot catch subtle but critical semantic errors. Our methodology, through teacher-driven test generation and iterative refinement, ensures that each design in the augmented dataset is not only syntactically valid but also functionally validated with unit tests. + +# C. Algorithm and Prompts + +Algorithm 1 presents our automated pipeline for transforming an unvalidated RTL dataset into a functionally validated one. Starting from a dataset $D = \{(s_i, d_i)\}_{i=1}^N$ , where each + +example consists of a natural language specification $s_i$ and a corresponding RTL design $d_i$ (e.g., from Origen [8]), the goal is to generate a unit test $t_i$ that validates the functional correctness of the design. If the design fails to pass the test, we invoke an iterative refinement loop that updates the design and test until it passes or a maximum number of attempts $T$ is reached. We set $T = 5$ in our experiments. + +The procedure is powered by a teacher model, GPT-4o-mini, which corresponds to the LLMInvoke calls in Algorithm 1. While stronger models such as GPT-4o or o3-mini may yield better performance, we use GPT-4o-mini in practice because of the large size of the dataset (217,462 examples in Origen) and the high cost associated with repeated API queries to OpenAI models. + +The process begins by prompting the teacher model with the test generation template (Figure 4a), together with a natural language specification and its initial RTL design (e.g., Figure 2a and Figure 2b). The model then produces a candidate unit test (e.g., Figure 3) designed to check whether the design satisfies the intended functionality under simulation. + +The design and test are compiled and simulated using standard Verilog tooling. If the test fails, for example due to a timeout, incorrect output, or another runtime error, we construct a refinement prompt (Figure 4b) that includes the specification, the failing design and test, and the simulation error message (corresponding to the err variable in Algorithm 1). This prompt is then passed to the teacher model, which attempts to fix the issue by making edits to the design, the test, or both. + +The refinement process repeats until the updated design passes simulation or the maximum number of attempts $T$ is reached. Once a design successfully passes its test, the validated triple $(s_i, d_i, t_i)$ is added to the output dataset $D'$ . + +This strategy enables systematic detection and correction of subtle RTL bugs that cannot be identified through syntax checks alone. By integrating LLM-based test generation and iterative refinement into the dataset construction pipeline, we produce a dataset that is not only syntactically valid but also functionally validated through simulation. + +While functional correctness under all possible inputs cannot be guaranteed, the inclusion of unit tests makes our augmented dataset substantially more robust than prior approaches that rely solely on syntactic checking. We view this as a practical and scalable step toward building higher-quality fine-tuning datasets for RTL generation. To assess quality, we manually reviewed 100 randomly sampled examples and found that $92\%$ of the generated RTL code correctly matched the corresponding natural language descriptions. + +# IV. EXPERIMENTAL SETUP + +# A. Dataset + +Following the methodology described in Section III, we construct a fine-tuning dataset comprising 125,777 examples. Each example includes a natural language specification, a corresponding RTL design, and associated unit tests. Table II summarizes key statistics: the specifications contain an average of 247 words (ranging from 116 to 549), RTL implementations average 35 lines of code (ranging from 5 to 225), and unit + +
CategoryCountLength
MinMaxAvg
NL specification (words)116549247
Design (lines of RTL)125,777522535
Unit tests (lines of RTL)619755
+ +TABLE II: Dataset statistics: total number of examples and length distributions for natural language specifications, RTL implementations, and unit tests in the VeriCoder dataset. + +tests average 55 lines (ranging from 6 to 197). We use the specification-solution pairs from this dataset to train our model, VeriCoder. + +# B. LoRA Fine-Tuning Setup + +Following standard practices for LLM fine-tuning, we fine-tune the base model of Qwen2.5-14B-Instruct using Low-Rank Adaptation (LoRA, described in Section II-A), with a rank of 16 and a scaling factor of 32 to all linear projection layers in the transformer. Training is conducted over 3 epochs with a batch size of 40. We adopt a constant learning rate of $1 \times 10^{-5}$ , paired with a linear decay scheduler and a warm-up ratio of 0.05. The optimizer is used with a weight decay of $1 \times 10^{-4}$ and gradient clipping is applied with a maximum norm of 1. + +# C. Benchmarks and Metrics + +Following the evaluation protocol established in prior work [7], [8], we benchmark against VerilogEval [9] and RTLLM [10]. For VerilogEval, we report the standard Pass@k metric with $k \in \{1,5,10\}$ , which estimates the expected probability that at least one of the top- $k$ generated programs passes all test cases. The metric is defined as: + +$$ +\operatorname {P a s s} @ k = \mathbb {E} \left[ 1 - \frac {\binom {n - c} {k}}{\binom {n} {k}} \right] +$$ + +where $n$ is the total number of generated programs and $c$ is the number of correct ones. All test cases are manually created by experts who design the benchmarks. In all evaluations, we set $n = 10$ . For RTLLM, we report both syntax correctness and functional correctness using Pass@5. This evaluation setup aligns with that used in prior work [8]. + +# D. Models for Evaluation + +We evaluate two groups of models. The first group consists of pretrained-only base models, including OpenAI's latest releases (o4-mini, o3-mini, GPT-4o, GPT-4o-mini), Google's Gemini 2.0 Flash, DeepSeek's R1 and DeepSeek-Coder-7B-v1.5 (the base model used in prior work [8]), Meta's LLaMA2-7B model, and Alibaba's Qwen2.5-14B-Instruct (our base model for fine-tuning). The second group includes fine-tuned models with released weights from prior work: Origen [8], RTLCoder [6], and ChipGPT [27]. + +To ensure a fair comparison, we use identical input prompts and post-processing scripts across all models. For models released by prior work, we do not adopt their model-specific + +
Model TypeEvaluated ModelVerilogEval V1.0 [9] +(using pass@k metric)RTLLM V1.1 [10] +(using pass@5 metric)
Eval-Machine (%)Eval-Human (%)Syntax-VCS (%)Functional (%)
k=1k=5k=10k=1k=5k=10
Base Modelso4-mini-2025-04-1661.967.868.664.366.467.186.272.4
GPT-4o-2024-11-2063.766.567.154.360.462.2100.069.0
GPT-4o-mini-2024-07-1855.762.464.344.751.655.189.765.5
DeepSeek-R165.770.972.062.869.169.979.358.6
o3-mini-2025-01-3166.471.672.062.068.969.969.055.2
Qwen2.5-14B-Instruct47.854.255.235.340.042.369.041.4
Gemini-2.0-flash-00160.362.663.652.157.659.065.534.5
DeepSeek-R1-Distill-Qwen-14B46.264.168.536.751.755.162.134.5
DeepSeek-Coder-7B-v1.544.458.962.925.840.244.948.324.1
LLaMA-2-7B7.015.618.90.42.13.83.40.0
Fine-Tuned Models +(Prior Work)OriGen [8]35.965.168.522.347.551.951.737.9
RTLCoder-DeepSeek [6]22.051.457.314.735.242.317.210.3
RTLCoder-Mistral [6]17.646.456.612.431.536.53.40.0
ChipGPT-LLaMA3.1-8B-SFT [27]17.646.456.612.431.536.513.80.0
ChipGPT-LLaMA2-SFT-7B [27]0.94.27.70.62.23.86.90.0
Our WorkVeriCoder55.762.964.338.349.251.979.348.3
+ +TABLE III: RTL code generation performance across models. To ensure a fair comparison, we use the same input prompts and apply identical post-processing scripts, running inference with model weights released by prior work. + +prompts [8] or inference pipelines [6], [27]. Instead, we apply a uniform evaluation script, with the only variable being the model under test. This standardization is critical, as both input formatting and post-processing can significantly affect performance. By controlling these factors, we isolate model capability and enable a fair comparison. + +# V. RESULTS + +# A. Main Evaluation Results + +Table III shows the results. Our major findings are as follows: + +a) Comparison with prior work: VeriCoder achieves state-of-the-art results across two RTL code generation benchmarks, outperforming all previously released open-source finetuned models. On VerilogEval-Machine, VeriCoder attains a pass@1 accuracy of $55.7\%$ , representing a 19.8 percentage point improvement over the best prior model, OriGen. On VerilogEval-Human, it reaches $38.3\%$ , exceeding OriGen by 16.0 percentage points. Across all evaluated $k$ -shot settings $(k = 1, 5, 10)$ , VeriCoder consistently maintains its lead on the Human split. On the RTLLM benchmark, VeriCoder achieves $79.3\%$ syntax correctness and $48.3\%$ functional correctness, surpassing OriGen's $51.7\%$ and $37.9\%$ , respectively. In conclusion, VeriCoder delivers relative improvements of up to $71.7\%$ on VerilogEval and $27.4\%$ on RTLLM in pass@k accuracy, surpassing the previous state-of-the-art model on both benchmarks. + +To better understand the relatively low performance of ChipGPT [27], we examined its outputs in detail. We observed that its generated RTL designs often include module headers that deviate from the given specifications, revealing difficulty in precise instruction following. Moreover, its base model, LLaMA2-7B, performs even worse, suggesting that limitations in the instruction-following capabilities of the underlying pretrained model constrain the effectiveness of the fine-tuned + +variant. For a fair comparison, we do not apply any of the model-specific customized post-processing scripts that attempt to fix syntax or header issues. Instead, we use a standardized evaluation script for all models, extracting Verilog code as-is to ensure consistency. + +b) Effectiveness of our fine-tuning: Starting from Qwen-2.5-14B-Instruct as our base model, VeriCoder delivers substantial gains across VerilogEval. On the VerilogEvalMachine split, pass@1 jumps up by $7.6\%$ , pass@5 by $4.0\%$ , and pass@10 by $2.1\%$ , and VerilogEval-Human reflects the same trend. On RTLLM, functional pass@5 is $7\%$ higher than its base model. Specifically, VeriCoder even marginally outperforms one of the commercial models, Google's Gemini2.0-flash, on pass@5 and pass@10 metrics of Eval-Machine as well as on RTLLM. Together, these results demonstrate that our fine-tuning process and our validated dataset significantly boost pass@k metrics and semantic correctness in RTL generation. + +c) Model gap remains: Despite the observed improvements, a substantial performance gap persists between VeriCoder and the strongest large models. For instance, o3-mini attains $66.4\%$ on VerilogEval Pass@1 compared to VeriCoder's $55.7\%$ . DeepSeek-R1 achieves $69.1\%$ on human-graded Pass@5, versus VeriCoder's $49.2\%$ . Commercial LLMs such as GPT-4o reach a perfect $100.0\%$ Syntax-VCS validity and $69.0\%$ functional correctness, while VeriCoder records $79.3\%$ and $48.3\%$ , respectively. Despite the performance gap, open-source lightweight models offer compelling advantages. They provide transparency, allow for local deployment, and ensure intellectual property protection, i.e., capabilities that are particularly important for RTL design workflows where security, customizability, and integration into existing toolchains are critical. + +
ModelVerilogEval [9] (Pass@5)RTLLM [10] (Pass@5)
SyntaxFunc
Qwen2.5-14B-Instruct (base)46.869.041.4
Qwen w/ unvalidated data53.575.944.8
Qwen w/ validated data55.879.348.3
+ +TABLE IV: We performed fine-tuning on the same base model using a functionally validated dataset and the functionally unvalidated dataset [8]. We report Pass@5 metrics for all models on two benchmarks. + +# B. Ablation Study of Dataset + +To assess the impact of dataset quality on RTL code generation, we conduct an ablation study using the same base model, Qwen2.5-14B-Instruct, fine-tuned on two datasets: (1) the unvalidated OriGen dataset from prior work [8], and (2) our newly curated, functionally validated dataset. All factors, including dataset size, fine-tuning hyperparameters, training procedures, and evaluation settings, are held constant to ensure a fair comparison. + +Across all metrics, we observe a consistent improvement as dataset quality increases. On the VerilogEval benchmark (covering both Machine and Human subsets), the base model achieves $46.8\%$ Pass@5. Fine-tuning on the unvalidated dataset raises performance to $53.5\%$ , while our validated dataset further improves it to $55.8\%$ . For RTLLM syntax correctness, the trend is similar: $69.0\%$ for the base model, $75.9\%$ for the unvalidated version, and $79.3\%$ when trained on validated data. Functional correctness sees even more significant improvement, rising from $41.4\%$ (base) to $44.8\%$ (unvalidated) and ultimately to $48.3\%$ (validated). + +These results demonstrate that functionally validated data provides more effective supervision than existing unvalidated data. This also underscores the importance of dataset quality in fine-tuning LLMs for RTL code generation. + +# C. Test Passing Rates of Non-Validated Datasets + +We examine the quality of fine-tuning datasets released by prior work by evaluating their passing rates against our synthetic unit tests generated by the teacher model GPT-4o-mini. For each corpus, we randomly sample 1,000 Verilog implementations and apply the test generation and refinement pipeline described in Section III. We then run corresponding unit tests against the original design and measure the proportion of the original designs that successfully pass the generated tests. As shown in Table V, only $24.4\%$ examples of the RTLCoder dataset [6] pass our functional tests, while OriGen [8] reaches $53.5\%$ . + +OriGen's higher pass rate aligns with its stronger code generation results in Table III, hinting at a positive link between dataset validity and downstream performance. These findings highlight the potential value of incorporating functional correctness validation into fine-tuning dataset curation for better RTL code generation. + +
Prior Datasets# Sampled ExamplesTest Passing (%)
RTLCoder [6]100024.4
OriGen [8]100053.5
+ +TABLE V: Test passing rates $(\%)$ of datasets released by prior work on a randomly sampled set of 1000 examples. + +# VI. DISCUSSION AND FUTURE WORK + +While VeriCoder, combining unit test generation with feedback-driven refinement, improves the functional correctness of generated RTL code, it does not fully guarantee correctness. Synthetic test cases may fail to capture all possible edge cases. To address this challenge, future work should explore integrating formal verification techniques into the dataset construction pipeline to rigorously ensure the correctness of the generated code. Recent advancements have demonstrated promising results in translating natural language instructions into formal specifications [16], [38], as well as enforcing formal constraints during LLM-based code generation [39]. + +Moreover, most existing approaches, including VeriCoder, focus on small-scale RTL generation. However, practical hardware development often involves large, repository-level codebases with intricate cross-file dependencies and requirements for long-range context [40]–[42]. Recent work has begun to address these challenges through techniques such as combining fine-tuning with retrieval-augmented RTL code generation [43], [44]. Extending VeriCoder's unit test generation and feedback-directed refinement components to the repository scale will enable LLMs to handle more real-world RTL tasks. + +Furthermore, reinforcement learning (RL) offers a powerful framework for further optimizing large language models' performance beyond what is achievable through supervised fine-tuning alone. Recent studies have demonstrated the effectiveness of RL in enhancing LLM-based code generation by incorporating diverse forms of feedback, such as test case outcomes, compiler diagnostics, and formal verification results [32], [45], [46]. Building on this progress, future work could investigate applying RL techniques to the VeriCoder dataset, using the accompanying test cases as a feedback signal to iteratively improve RTL code generation quality. + +# VII. CONCLUSION + +Recent advances in Large Language Models (LLMs) have opened new possibilities for Electronic Design Automation (EDA), particularly in RTL code generation. However, most existing datasets emphasize syntactic validity while overlooking functional correctness, which limits the effectiveness of finetuned models. We introduce VERICODER, a model fine-tuned on a dataset with 125,000 examples that is validated for functional correctness. This dataset is constructed using a feedback-directed refinement pipeline guided by a teacher LLM, which generates and iteratively updates both RTL designs and unit tests until the design passes simulation. The resulting dataset consists of functionally validated triples comprising a natural language specification, an RTL implementation, and a passing test. Fine-tuned on this dataset, VERICODER achieves state-of-the-art results on two established RTL benchmarks, + +yielding relative improvements of up to $71.7\%$ on VerilogEval and $27.4\%$ on RTLLM. An ablation study confirms the impact of functional validation on model performance, underscoring the importance of high-quality training data. Future work may explore formal verification and reinforcement learning to further advance AI-assisted hardware design. + +# ACKNOWLEDGMENT + +We thank Samantha Archer, Yao Hsiao, Mohammad Rahmani Fadiheh and Subhasish Mitra for their discussions. This work was partially supported by a Google Research Award. + +# REFERENCES + +[1] M. Liu, T.-D. Ene, R. Kirby, C. Cheng, N. Pinckney, R. Liang, J. Alben, H. Anand, S. Banerjee, I. Bayraktaroglu et al., "Chipnemo: Domain-adapted llms for chip design," arXiv preprint arXiv:2311.00176, 2023. +[2] L. Chen, Y. Chen, Z. Chu, W. Fang, T.-Y. Ho, R. Huang, Y. Huang, S. Khan, M. Li, X. Li et al., "The dawn of ai-native eda: Opportunities and challenges of large circuit models," arXiv preprint arXiv:2403.07257, 2024. +[3] R. Zhong, X. Du, S. Kai, Z. Tang, S. Xu, H.-L. Zhen, J. Hao, Q. Xu, M. Yuan, and J. Yan, "Llm4eda: Emerging progress in large language models for electronic design automation," arXiv preprint arXiv:2401.12224, 2023. +[4] Z. He and B. Yu, “Large language models for eda: Future or mirage?” in Proceedings of the 2024 International Symposium on Physical Design, 2024, pp. 65–66. +[5] X. Yao, Y. Wang, X. Li, Y. Lian, R. Chen, L. Chen, M. Yuan, H. Xu, and B. Yu, "Rtlwriter: Methodologies for large models aided rtl code optimization," in Proceedings of the 43rd IEEE/ACM International Conference on Computer-Aided Design, 2024, pp. 1-7. +[6] S. Liu, W. Fang, Y. Lu, J. Wang, Q. Zhang, H. Zhang, and Z. Xie, "Rtlcoder: Fully open-source and efficient ltm-assisted rtl code generation technique," IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, 2024. +[7] S. Liu, W. Fang, Y. Lu, Q. Zhang, H. Zhang, and Z. Xie, "Rtlcoder: Outperforming gpt-3.5 in design rtl generation with our open-source dataset and lightweight solution," in 2024 IEEE LLM Aided Design Workshop (LAD). IEEE, 2024, pp. 1-5. +[8] F. Cui, C. Yin, K. Zhou, Y. Xiao, G. Sun, Q. Xu, Q. Guo, D. Song, D. Lin, X. Zhang et al., "Origen: Enhancing rtl code generation with code-to-code augmentation and self-reflection," arXiv preprint arXiv:2407.16237, 2024. +[9] M. Liu, N. Pinckney, B. Khailany, and H. Ren, "Veriloggeval: Evaluating large language models for verilog code generation," in 2023 IEEE/ACM International Conference on Computer Aided Design (ICCAD). IEEE, 2023, pp. 1-8. +[10] Y. Lu, S. Liu, Q. Zhang, and Z. Xie, "Rtllm: An open-source benchmark for design rtl generation with large language model," in 2024 29th Asia and South Pacific Design Automation Conference (ASP-DAC). IEEE, 2024, pp. 722-727. +[11] Y. Tsai, M. Liu, and H. Ren, "Rtlfixer: Automatically fixing rtI syntax errors with large language model," in Proceedings of the 61st ACM/IEEE Design Automation Conference, 2024, pp. 1-6. +[12] Y. Liao, T. Adegbija, and R. Lysecky, "Are llms any good for high-level synthesis?" in Proceedings of the 43rd IEEE/ACM International Conference on Computer-Aided Design, 2024, pp. 1-8. +[13] Y. Fu, Y. Zhang, Z. Yu, S. Li, Z. Ye, C. Li, C. Wan, and Y. C. Lin, "Gpt4aigchip: Towards next-generation ai accelerator design automation via large language models," in 2023 IEEE/ACM International Conference on Computer Aided Design (ICCAD). IEEE, 2023, pp. 1-9. +[14] Z. Yan, Y. Qin, X. S. Hu, and Y. Shi, "On the viability of using llms for sw/hw co-design: An example in designing cim dnn accelerators," in 2023 IEEE 36th International System-on-Chip Conference (SOCC). IEEE, 2023, pp. 1-6. +[15] Z. Liang, J. Cheng, R. Yang, H. Ren, Z. Song, D. Wu, X. Qian, T. Li, and Y. Shi, "Unleashing the potential of llms for quantum computing: A study in quantum architecture design," arXiv preprint arXiv:2307.08191, 2023. + +[16] M. Cosler, C. Hahn, D. Mendoza, F. Schmitt, and C. Trippel, "nl2spec: Interactively translating unstructured natural language to temporal logics with large language models," in International Conference on Computer Aided Verification. Springer, 2023, pp. 383-396. +[17] C. Sun, C. Hahn, and C. Trippel, "Towards improving verification productivity with circuit-aware translation of natural language to systemverilog assertions," in First International Workshop on Deep Learning-aided Verification, 2023. +[18] H. Wu, Z. He, X. Zhang, X. Yao, S. Zheng, H. Zheng, and B. Yu, "Chateda: A large language model powered autonomous agent for eda," IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, 2024. +[19] Z. Xiao, X. He, H. Wu, B. Yu, and Y. Guo, "Eda-copilot: A ragpowered intelligent assistant for eda tools," ACM Transactions on Design Automation of Electronic Systems, 2025. +[20] K. Xu, J. Sun, Y. Hu, X. Fang, W. Shan, X. Wang, and Z. Jiang, "Meic: Re-thinking rtl debug automation using llms," in Proceedings of the 43rd IEEE/ACM International Conference on Computer-Aided Design, 2024, pp. 1-9. +[21] R. Li, L. B. Allal, Y. Zi, N. Muennighoff, D. Kocetkov, C. Mou, M. Marone, C. Akiki, J. Li, J. Chim et al., "Starcoder: may the source be with you!" arXiv preprint arXiv:2305.06161, 2023. +[22] A. Lozhkov, R. Li, L. B. Allal, F. Cassano, J. Lamy-Poirier, N. Tazi, A. Tang, D. Pykhtar, J. Liu, Y. Wei et al., "Starcoder 2 and the stack v2: The next generation," arXiv preprint arXiv:2402.19173, 2024. +[23] E. Dehaerne, B. Dey, S. Halder, and S. De Gendt, “A deep learning framework for verilog autocompletion towards design and verification automation,” arXiv preprint arXiv:2304.13840, 2023. +[24] Z. Pei, H.-L. Zhen, M. Yuan, Y. Huang, and B. Yu, "Betterv: Controlled verilog generation with discriminative guidance," arXiv preprint arXiv:2402.03375, 2024. +[25] S. Thakur, B. Ahmad, Z. Fan, H. Pearce, B. Tan, R. Karri, B. Dolan-Gavitt, and S. Garg, "Benchmarking large language models for automated verilog RTL code generation," in 2023 Design, Automation & Test in Europe Conference & Exhibition (DATE). IEEE, 2023, pp. 1-6. +[26] S. Thakur, B. Ahmad, H. Pearce, B. Tan, B. Dolan-Gavitt, R. Karri, and S. Garg, "Verigen: A large language model for verilog code generation," ACM Transactions on Design Automation of Electronic Systems, vol. 29, no. 3, pp. 1-31, 2024. +[27] K. Chang, K. Wang, N. Yang, Y. Wang, D. Jin, W. Zhu, Z. Chen, C. Li, H. Yan, Y. Zhou et al., "Data is all you need: Finetuning llms for chip design via an automated design-data augmentation framework," in Proceedings of the 61st ACM/IEEE Design Automation Conference, 2024, pp. 1-6. +[28] E. J. Hu, Y. Shen, P. Wallis, Z. Allen-Zhu, Y. Li, S. Wang, L. Wang, W. Chen et al., “Lora: Low-rank adaptation of large language models.” *ICLR*, vol. 1, no. 2, p. 3, 2022. +[29] M. Liu, Y.-D. Tsai, W. Zhou, and H. Ren, "Craftrtl: High-quality synthetic data generation for verilog code models with correct-by-construction non-textual representations and targeted code repair," ArXiv, vol. abs/2409.12993, 2024. [Online]. Available: https://api_semanticscholar.org/CorpusID:272770433 +[30] Y. Zhang, Z. Yu, Y. Fu, C. Wan, and Y. C. Lin, "Mg-verilog: Multi-grained dataset towards enhanced llm-assisted verilog generation," in 2024 IEEE LLM Aided Design Workshop (LAD). IEEE, 2024, pp. 1-5. +[31] E. Goh, M. Xiang, I. Wey, T. H. Teo et al., “From english to asi: Hardware implementation with large language model,” arXiv preprint arXiv:2403.07039, 2024. +[32] S. Liu, Y. Lu, W. Fang, M. Li, and Z. Xie, "Openllm-rtl: Open dataset and benchmark for llm-aided design rtl generation," in Proceedings of the 43rd IEEE/ACM International Conference on Computer-Aided Design, 2024, pp. 1-9. +[33] C. Deng, Y.-D. Tsai, G.-T. Liu, Z. Yu, and H. Ren, "Scalertl: Scaling llms with reasoning data and test-time compute for accurate rtl code generation," ArXiv, vol. abs/2506.05566, 2025. [Online]. Available: https://api-semanticscholar.org/CorpusID:279243692 +[34] Y. Liu, C. Xu, Y. Zhou, Z. Li, and Q. Xu, "Deeprl: Bridging verilog understanding and generation with a unified representation model," ArXiv, vol. abs/2502.15832, 2025. [Online]. Available: https://api-semanticscholar.org/CorpusID:276574886 +[35] N. Wang, B. Yao, J. Zhou, X. Wang, Z. Jiang, and N. Guan, "Large language model for verilog generation with golden code feedback," ArXiv, vol. abs/2407.18271, 2024. [Online]. Available: https://api_semanticscholar.org/CorpusID:271516462 +[36] M. Gao, J. Zhao, Z. Lin, W. Ding, X. Hou, Y. Feng, C. Li, and M. Guo, "Autovcoder: A systematic framework for automated verilog code generation using llms," 2024 IEEE 42nd International Conference + +on Computer Design (ICCD), pp. 162-169, 2024. [Online]. Available: https://api(semanticscholar.org/CorpusID:271516210 +[37] Y. Zhao, D. Huang, C. Li, P. Jin, Z. Nan, T. Ma, L. Qi, Y. Pan, Z. Zhang, R. Zhang, X. Zhang, Z. Du, Q. Guo, X. Hu, and Y. Chen, "Codev: Empowering llms withhdl generation through multi-level summarization," 2024. [Online]. Available: https://api_semanticscholar.org/CorpusID:271212791 +[38] D. Mendoza, C. Hahn, and C. Trippel, "Translating natural language to temporal logics with large language models and model checkers," in 2024 Formal Methods in Computer-Aided Design (FMCAD), 2024, pp. 1-11. +[39] P. Aggarwal, B. Parno, and S. Welleck, "Alphaverus: Bootstrapping formally verified code generation through self-improving translation and treefinement," arXiv preprint arXiv:2412.06176, 2024. +[40] C. E. Jimenez, J. Yang, A. Wettig, S. Yao, K. Pei, O. Press, and K. Narasimhan, "Swe-bench: Can language models resolve real-world github issues?" arXiv preprint arXiv:2310.06770, 2023. +[41] T. Suresh, R. G. Reddy, Y. Xu, Z. Nussbaum, A. Mulyar, B. Duderstadt, and H. Ji, "Cornstack: High-quality contrastive data for better code retrieval and reranking," in The Thirteenth International Conference on Learning Representations, 2025. +[42] N. Jain, M. Shetty, T. Zhang, K. Han, K. Sen, and I. Stoica, “R2e: Turning any github repository into a programming agent environment,” in ICML, 2024. +[43] P. Wu, N. Guo, J. Lv, X. Xiao, and X. Ye, "RtlrepEncoder: Repository-level rtl code completion through the combination of fine-tuning and retrieval augmentation," arXiv preprint arXiv:2504.08862, 2025. +[44] Z. Li, C. Xu, Z. Shi, Z. Peng, Y. Liu, Y. Zhou, L. Zhou, C. Ma, J. Zhong, X. Wang et al., "Deepcircuits: A comprehensive repository-level dataset for rtl code understanding, generation, and ppa analysis," arXiv preprint arXiv:2502.18297, 2025. +[45] N. Wang, B. Yao, J. Zhou, X. Wang, Z. Jiang, and N. Guan, “Large language model for verilog generation with golden code feedback,” arXiv preprint arXiv:2407.18271, 2024. +[46] J. Wang, Z. Zhang, Y. He, Y. Song, T. Shi, Y. Li, H. Xu, K. Wu, G. Qian, Q. Chen et al., “Enhancing code llms with reinforcement learning in code generation,” arXiv preprint arXiv:2412.20367, 2024. \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15659/images/00fce26a709a57a9ebe243142fabd9e290335c60132974be06478f7338456cf5.jpg b/data/2025/2504_15xxx/2504.15659/images/00fce26a709a57a9ebe243142fabd9e290335c60132974be06478f7338456cf5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8bb6b41b40603c0b9ad5838d751f65ef2b2ada1f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15659/images/00fce26a709a57a9ebe243142fabd9e290335c60132974be06478f7338456cf5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7df5eb0f1f5347434c8d7f56a5570c17bb3e32029956e621b2621087c429f637 +size 3828 diff --git a/data/2025/2504_15xxx/2504.15659/images/05504b8cfde9e73c710b4509186e765d80b7e2614e9c4341ab0ec20f2649812f.jpg b/data/2025/2504_15xxx/2504.15659/images/05504b8cfde9e73c710b4509186e765d80b7e2614e9c4341ab0ec20f2649812f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2d55a092ecd83d3567a4aade105ba5e39436fa85 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15659/images/05504b8cfde9e73c710b4509186e765d80b7e2614e9c4341ab0ec20f2649812f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e346d96e6aef8f8e3a64c08cdbf1e494b33cb25aea4804939ff600c8abf1ba0 +size 28553 diff --git a/data/2025/2504_15xxx/2504.15659/images/1c236d229de3d510d104e28dbad8d6ae03c635914ee6ce1bf2d818846dae0d09.jpg b/data/2025/2504_15xxx/2504.15659/images/1c236d229de3d510d104e28dbad8d6ae03c635914ee6ce1bf2d818846dae0d09.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5382c6ca944f92c44c7c5b5370d04b38eaab0957 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15659/images/1c236d229de3d510d104e28dbad8d6ae03c635914ee6ce1bf2d818846dae0d09.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac7780a632ccab07146b98fc327d376e5d035d1c22db1a19f697d5e8c2c9cfc5 +size 160028 diff --git a/data/2025/2504_15xxx/2504.15659/images/36bafefd15fa00a56e86e549967356d7fadc2ba905d753cdbfae75fdbec5b723.jpg b/data/2025/2504_15xxx/2504.15659/images/36bafefd15fa00a56e86e549967356d7fadc2ba905d753cdbfae75fdbec5b723.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a61417323e97a533f8a9d7e0b9c3c5cd4e8c59dc --- /dev/null +++ b/data/2025/2504_15xxx/2504.15659/images/36bafefd15fa00a56e86e549967356d7fadc2ba905d753cdbfae75fdbec5b723.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c75fd556f95440db79066abc6c0809a9d2b9761574ae8798664c17f8b86e70b1 +size 29583 diff --git a/data/2025/2504_15xxx/2504.15659/images/4a5ec9cc51cb0015f2ab1c778411473f2c44ef14e45f1231bc3002fe848b6dfa.jpg b/data/2025/2504_15xxx/2504.15659/images/4a5ec9cc51cb0015f2ab1c778411473f2c44ef14e45f1231bc3002fe848b6dfa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a9d2db095a56f1d9a53c3dd5191bf6cae37cd7f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15659/images/4a5ec9cc51cb0015f2ab1c778411473f2c44ef14e45f1231bc3002fe848b6dfa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82c9c1a9381350afa4fbe481dc60c3df1ac8700e8d6e1e45a796a13684a8107d +size 18508 diff --git a/data/2025/2504_15xxx/2504.15659/images/5c1e7c992b49adf8de5ea90499d94e6e076d00b68a9c604449489cfdfdbc9a13.jpg b/data/2025/2504_15xxx/2504.15659/images/5c1e7c992b49adf8de5ea90499d94e6e076d00b68a9c604449489cfdfdbc9a13.jpg new file mode 100644 index 0000000000000000000000000000000000000000..786a95695ab6208e09f142bf88fb14df6c4a64c6 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15659/images/5c1e7c992b49adf8de5ea90499d94e6e076d00b68a9c604449489cfdfdbc9a13.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77070676cf7054d95ec0a0d3af5cd2936df9fbae9c72780bb0cd40b2f733fe3d +size 64981 diff --git a/data/2025/2504_15xxx/2504.15659/images/5c4b5c5dd73b0cbcb62a0a21938683d163731bae2003182c9845173afbe9b6cb.jpg b/data/2025/2504_15xxx/2504.15659/images/5c4b5c5dd73b0cbcb62a0a21938683d163731bae2003182c9845173afbe9b6cb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..369c1fe907f3556b337bf18517a83ac96d9b860c --- /dev/null +++ b/data/2025/2504_15xxx/2504.15659/images/5c4b5c5dd73b0cbcb62a0a21938683d163731bae2003182c9845173afbe9b6cb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4359763affd3b71136a8741015b97fed02e64c4c6bab56f7bf0f45c6cb4a353f +size 21680 diff --git a/data/2025/2504_15xxx/2504.15659/images/78eb9c01e8aec3197f5771bc2618afcf24170b28ee21cbb3c8fa98cb91a33acc.jpg b/data/2025/2504_15xxx/2504.15659/images/78eb9c01e8aec3197f5771bc2618afcf24170b28ee21cbb3c8fa98cb91a33acc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..93c55f6cfbba81581764b3390168e26d5c92436b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15659/images/78eb9c01e8aec3197f5771bc2618afcf24170b28ee21cbb3c8fa98cb91a33acc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15c91192c473d106798e0240eca46e3951c84de47836d65271dec6a1375f13ce +size 16886 diff --git a/data/2025/2504_15xxx/2504.15659/images/9e6ce1035e375309829d6f9583b1aeaed11f21f05777f87cc4761e227314122f.jpg b/data/2025/2504_15xxx/2504.15659/images/9e6ce1035e375309829d6f9583b1aeaed11f21f05777f87cc4761e227314122f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d65780ce8d46219f18909ef1f22fe4a42d327bf7 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15659/images/9e6ce1035e375309829d6f9583b1aeaed11f21f05777f87cc4761e227314122f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d492204d1f301e376a25b7291967b570c1eec05871a491c646563752a5baae4c +size 153831 diff --git a/data/2025/2504_15xxx/2504.15659/images/ca37e8cb215359fe9ab488e17022939fb18c1f5bc9ddee7a7e9156793fa8f592.jpg b/data/2025/2504_15xxx/2504.15659/images/ca37e8cb215359fe9ab488e17022939fb18c1f5bc9ddee7a7e9156793fa8f592.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a103a3c1f7d321d94c79b727288fe789e7780ddc --- /dev/null +++ b/data/2025/2504_15xxx/2504.15659/images/ca37e8cb215359fe9ab488e17022939fb18c1f5bc9ddee7a7e9156793fa8f592.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ec95e2c6d6e02f4c92a09f1706e4ce60eba0f129787e4bf2c04e22486463f37 +size 18447 diff --git a/data/2025/2504_15xxx/2504.15659/images/efb2efdf41bf536b56a5cb752b5e9196ea03bb35d4a7d8a0f2ff0503a08de96e.jpg b/data/2025/2504_15xxx/2504.15659/images/efb2efdf41bf536b56a5cb752b5e9196ea03bb35d4a7d8a0f2ff0503a08de96e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c0a7eb6c408dd5833a22c2dbdd0df95504bdd793 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15659/images/efb2efdf41bf536b56a5cb752b5e9196ea03bb35d4a7d8a0f2ff0503a08de96e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:422e1c8e1b9bd5165e2bcf7e82fb1f8f403bb5d786c9545768c015457f28a193 +size 4617 diff --git a/data/2025/2504_15xxx/2504.15659/images/efc2cd0a1e4cff36abd7b2e83b3cb5a92c4d06bb61768480b1b42d6d3437d91b.jpg b/data/2025/2504_15xxx/2504.15659/images/efc2cd0a1e4cff36abd7b2e83b3cb5a92c4d06bb61768480b1b42d6d3437d91b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..41a7941e3fd395b6e768379229bba942ae8d9e98 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15659/images/efc2cd0a1e4cff36abd7b2e83b3cb5a92c4d06bb61768480b1b42d6d3437d91b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e63f5f80bf3c3c878a76d88f994ea3839efe802afedf07232e07e1f7ca05666 +size 5360 diff --git a/data/2025/2504_15xxx/2504.15659/layout.json b/data/2025/2504_15xxx/2504.15659/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..fd1c625f98cbbbf834138b24afa00ae0100c077b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15659/layout.json @@ -0,0 +1,9540 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 61, + 56, + 549, + 103 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 56, + 549, + 103 + ], + "spans": [ + { + "bbox": [ + 61, + 56, + 549, + 103 + ], + "type": "text", + "content": "VeriCoder: Enhancing LLM-Based RTL Code Generation through Functional Correctness Validation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 129, + 113, + 481, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 113, + 481, + 140 + ], + "spans": [ + { + "bbox": [ + 129, + 113, + 481, + 140 + ], + "type": "text", + "content": "Anjiang Wei " + }, + { + "bbox": [ + 129, + 113, + 481, + 140 + ], + "type": "inline_equation", + "content": "①" + }, + { + "bbox": [ + 129, + 113, + 481, + 140 + ], + "type": "text", + "content": ", Huanmi Tan " + }, + { + "bbox": [ + 129, + 113, + 481, + 140 + ], + "type": "inline_equation", + "content": "①" + }, + { + "bbox": [ + 129, + 113, + 481, + 140 + ], + "type": "text", + "content": ", Tarun Suresh " + }, + { + "bbox": [ + 129, + 113, + 481, + 140 + ], + "type": "inline_equation", + "content": "①" + }, + { + "bbox": [ + 129, + 113, + 481, + 140 + ], + "type": "text", + "content": ", Daniel Mendoza " + }, + { + "bbox": [ + 129, + 113, + 481, + 140 + ], + "type": "inline_equation", + "content": "①" + }, + { + "bbox": [ + 129, + 113, + 481, + 140 + ], + "type": "text", + "content": ", Thiago S. F. X. Teixeira " + }, + { + "bbox": [ + 129, + 113, + 481, + 140 + ], + "type": "inline_equation", + "content": "①" + }, + { + "bbox": [ + 129, + 113, + 481, + 140 + ], + "type": "text", + "content": ", Ke Wang " + }, + { + "bbox": [ + 129, + 113, + 481, + 140 + ], + "type": "inline_equation", + "content": "①" + }, + { + "bbox": [ + 129, + 113, + 481, + 140 + ], + "type": "text", + "content": ", Caroline Trippel " + }, + { + "bbox": [ + 129, + 113, + 481, + 140 + ], + "type": "inline_equation", + "content": "①" + }, + { + "bbox": [ + 129, + 113, + 481, + 140 + ], + "type": "text", + "content": ", and Alex Aiken " + }, + { + "bbox": [ + 129, + 113, + 481, + 140 + ], + "type": "inline_equation", + "content": "①" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 175, + 301, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 175, + 301, + 456 + ], + "spans": [ + { + "bbox": [ + 45, + 175, + 301, + 456 + ], + "type": "text", + "content": "Abstract—Recent advances in Large Language Models (LLMs) have sparked growing interest in applying them to Electronic Design Automation (EDA) tasks, particularly Register Transfer Level (RTL) code generation. While several RTL datasets have been introduced, most focus on syntactic validity rather than functional validation with tests, leading to training examples that compile but may not implement the intended behavior. We present VERICODER, a model for RTL code generation fine-tuned on a dataset validated for functional correctness. This fine-tuning dataset is constructed using a novel methodology that combines unit test generation with feedback-directed refinement. Given a natural language specification and an initial RTL design, we prompt a teacher model (GPT-4o-mini) to generate unit tests and iteratively revise the RTL design based on its simulation results using the generated tests. If necessary, the teacher model also updates the tests to ensure they comply with the natural language specification. As a result of this process, every example in our dataset is functionally validated, consisting of a natural language description, an RTL implementation, and passing tests. Fine-tuned on this dataset of 125,777 examples, VERICODER achieves state-of-the-art metrics in functional correctness on VerilogEval and RTLLM, with relative gains of up to " + }, + { + "bbox": [ + 45, + 175, + 301, + 456 + ], + "type": "inline_equation", + "content": "71.7\\%" + }, + { + "bbox": [ + 45, + 175, + 301, + 456 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 175, + 301, + 456 + ], + "type": "inline_equation", + "content": "27.4\\%" + }, + { + "bbox": [ + 45, + 175, + 301, + 456 + ], + "type": "text", + "content": ", respectively. An ablation study further shows that models trained on our functionally validated dataset outperform those trained on functionally non-validated datasets, underscoring the importance of high-quality datasets in RTL code generation. Our code, data, and models are publicly available at https://github.com/Anjiang-Wei/VeriCoder" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 460, + 301, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 460, + 301, + 471 + ], + "spans": [ + { + "bbox": [ + 55, + 460, + 301, + 471 + ], + "type": "text", + "content": "Index Terms—RTL, Code Generation, Large Language Model." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 499, + 215, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 499, + 215, + 510 + ], + "spans": [ + { + "bbox": [ + 132, + 499, + 215, + 510 + ], + "type": "text", + "content": "I. INTRODUCTION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 515, + 301, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 515, + 301, + 647 + ], + "spans": [ + { + "bbox": [ + 45, + 515, + 301, + 647 + ], + "type": "text", + "content": "Large Language Models (LLMs) have demonstrated remarkable performance across natural language processing tasks, spurring growing interest in applying their capabilities to a broad range of Electronic Design Automation (EDA) problems [1]–[4]. Recent efforts explore LLMs for code generation [5]–[12], architecture design [13]–[15], verification [16], [17], tool assistance [18], [19], and debugging [1], [20]. In this work, we focus on generating Register Transfer Level (RTL) code from natural language specifications. Automating RTL code generation has the potential to significantly boost hardware design productivity and reduce the manual effort" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 657, + 301, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 657, + 301, + 685 + ], + "spans": [ + { + "bbox": [ + 45, + 657, + 301, + 685 + ], + "type": "text", + "content": "Anjiang Wei, Daniel Mendoza, Caroline Trippel, and Alex Aiken are affiliated with Stanford University (e-mail: anjiang@cs.stanford.edu; dmendo@stanford.edu; trippel@stanford.edu; aiken@cs.stanford.edu)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 685, + 301, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 685, + 301, + 702 + ], + "spans": [ + { + "bbox": [ + 45, + 685, + 301, + 702 + ], + "type": "text", + "content": "Huanmi Tan is affiliated with Carnegie Mellon University (e-mail: huanmi.tan@gmail.com)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 702, + 301, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 702, + 301, + 720 + ], + "spans": [ + { + "bbox": [ + 45, + 702, + 301, + 720 + ], + "type": "text", + "content": "Tarun Suresh is affiliated with University of Illinois Urbana-Champaign (e-mail: tsuresh3@illinois.edu)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 720, + 301, + 738 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 720, + 301, + 738 + ], + "spans": [ + { + "bbox": [ + 45, + 720, + 301, + 738 + ], + "type": "text", + "content": "Thiago S. F. X. Teixeira is with Intel Corporation (e-mail: thiago.teixeira@intel.com)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 738, + 266, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 738, + 266, + 748 + ], + "spans": [ + { + "bbox": [ + 53, + 738, + 266, + 748 + ], + "type": "text", + "content": "Ke Wang is with Nanjing University (e-mail: kwg@nju.edu.cn)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 174, + 564, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 174, + 564, + 198 + ], + "spans": [ + { + "bbox": [ + 307, + 174, + 564, + 198 + ], + "type": "text", + "content": "involved in complex design tasks, making it a timely and impactful area of research." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 198, + 565, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 198, + 565, + 376 + ], + "spans": [ + { + "bbox": [ + 307, + 198, + 565, + 376 + ], + "type": "text", + "content": "Developing open-source, lightweight models for RTL code generation is essential for advancing both research and deployment. Proprietary models such as GPT-4o and Claude 3.7 restrict customization and lack transparency, making them unsuitable for in-depth analysis and academic exploration. They also raise privacy and security concerns, especially when handling RTL designs that may contain sensitive intellectual property. In contrast, lightweight models that can run locally offer a secure, privacy-preserving alternative—enabling hardware engineers to integrate AI directly into their design workflows. However, existing open-source models still underperform on RTL tasks, largely due to the absence of high-quality, functionally validated RTL datasets in their training corpora [21], [22]. While training algorithms are readily available, progress is bottlenecked by the lack of open datasets with functional correctness validation." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 378, + 565, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 378, + 565, + 533 + ], + "spans": [ + { + "bbox": [ + 307, + 378, + 565, + 533 + ], + "type": "text", + "content": "A key challenge in building such datasets lies in constructing large-scale, high-quality training data that pairs natural language specifications with RTL implementations. Despite efforts to mine RTL code from open-source repositories [23]–[26], much of the collected data lacks validation and may not align with its intended functionality. To address this, recent work has turned to LLMs—either prompting them to synthesize RTL designs from keyword-based specifications [6], [7] or leveraging them to rewrite existing RTL code and generate matching specifications [8], [24], [26]. In both cases, syntax checkers are often employed to filter uncompilable code or provide feedback for iterative refinement, but these techniques still fall short of validating functional correctness." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 533, + 565, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 533, + 565, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 533, + 565, + 712 + ], + "type": "text", + "content": "As far as we know, all these prior work [6]–[8], [24], [26] have focused solely on ensuring syntactic correctness, overlooking functional correctness. As a result, many dataset examples compile successfully but may not implement the behavior described in their natural language specifications. The distinction between syntactic correctness and functional correctness has important implications for model evaluation and real-world deployment. While functionally correct code inherently satisfies syntax constraints, syntactic correctness alone does not guarantee correct functionality. This gap is evident in the results reported by the RTLLM benchmark [10], where GPT-4o attains a high syntax accuracy of " + }, + { + "bbox": [ + 307, + 533, + 565, + 712 + ], + "type": "inline_equation", + "content": "100.0\\%" + }, + { + "bbox": [ + 307, + 533, + 565, + 712 + ], + "type": "text", + "content": ", yet achieve only " + }, + { + "bbox": [ + 307, + 533, + 565, + 712 + ], + "type": "inline_equation", + "content": "69.0\\%" + }, + { + "bbox": [ + 307, + 533, + 565, + 712 + ], + "type": "text", + "content": " in terms of functional correctness. Ultimately, in real-world settings, it is functional correctness rather than syntactic validity that truly matters." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 712, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 712, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 712, + 564, + 748 + ], + "type": "text", + "content": "In this work, we introduce VeriCoder, a model for RTL code generation fine-tuned on a high-quality dataset consisting of 125,777 examples that has been validated for functional" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 14, + 217, + 36, + 574 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 217, + 36, + 574 + ], + "spans": [ + { + "bbox": [ + 14, + 217, + 36, + 574 + ], + "type": "text", + "content": "arXiv:2504.15659v2 [cs.AR] 24 Aug 2025" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 50, + 565, + 242 + ], + "blocks": [ + { + "bbox": [ + 47, + 50, + 565, + 242 + ], + "lines": [ + { + "bbox": [ + 47, + 50, + 565, + 242 + ], + "spans": [ + { + "bbox": [ + 47, + 50, + 565, + 242 + ], + "type": "table", + "html": "
Prior WorkStrategyDescriptionSyntax CheckerUnit Tests
RTLCoder [7]Keyword-based Generation, MutationPrompt LLM with keywords and existing code, followed by iterative mutation to get instruction-code pairs.X
OriGen [8]Code-to-Code, Syntax Error CorrectionApplies LLM-driven code-to-code pipeline on existing RTL code and filters them by compiler error feedback.X
BetterV [24]Web Scraping & Cleaning, Alignment with CLarge-scale web-collected Verilog, cleaned and filtered to enforce coding standards; aligns C with Verilog.X
VeriGen [26]Manually Collect Textbook and Open-Source CodeMines real-world RTL from GitHub and textbooks, manually cleans and organizes them into a structured dataset.X
ChipGPT [27]AST-based SynthesisConverts Verilog ASTs into natural-language prompts and injects semantic error variants via EDA-tool feedback.X
VeriCoder (Our Work)Feedback-Directed Refinement, Simulation, Unit Test GenerationIteratively generate unit tests with a teacher LLM, check implementations via compiler and simulator, and refining designs and tests until each design passes.
", + "image_path": "9e6ce1035e375309829d6f9583b1aeaed11f21f05777f87cc4761e227314122f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 143, + 246, + 465, + 258 + ], + "lines": [ + { + "bbox": [ + 143, + 246, + 465, + 258 + ], + "spans": [ + { + "bbox": [ + 143, + 246, + 465, + 258 + ], + "type": "text", + "content": "TABLE I: Comparison of Verilog fine-tuning dataset construction approaches." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 45, + 279, + 301, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 279, + 301, + 434 + ], + "spans": [ + { + "bbox": [ + 45, + 279, + 301, + 434 + ], + "type": "text", + "content": "correctness1. To construct this dataset, we develop a novel pipeline that combines unit test generation with feedback-directed refinement guided by a teacher LLM (GPT-4o-mini). Given a natural language specification and an initial RTL implementation, the teacher model first generates a unit test. If the RTL code fails the simulation, the model iteratively revises the design based on the observed error messages. When needed, the unit test is also updated to better reflect the intended functionality described by the specification. This process continues until the design passes simulation or a retry limit is reached. The resulting fine-tuning dataset consists of 125,777 validated triples: a natural language specification, a correct RTL design, and a self-checking unit test." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 435, + 301, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 435, + 301, + 506 + ], + "spans": [ + { + "bbox": [ + 45, + 435, + 301, + 506 + ], + "type": "text", + "content": "We fine-tune VeriCoder from Qwen2.5-14B-Instruct using our curated dataset and evaluate it on two established RTL code generation benchmarks: VerilogEval [9] and RTLLM [10]. VeriCoder achieves new state-of-the-art performance, achieving up to " + }, + { + "bbox": [ + 45, + 435, + 301, + 506 + ], + "type": "inline_equation", + "content": "71.7\\%" + }, + { + "bbox": [ + 45, + 435, + 301, + 506 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 435, + 301, + 506 + ], + "type": "inline_equation", + "content": "27.4\\%" + }, + { + "bbox": [ + 45, + 435, + 301, + 506 + ], + "type": "text", + "content": " relative gains in the pass@k metric over the previous best fine-tuned model OriGen [8]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 506, + 302, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 506, + 302, + 566 + ], + "spans": [ + { + "bbox": [ + 45, + 506, + 302, + 566 + ], + "type": "text", + "content": "We conduct an ablation study demonstrating that models trained on our functionally validated dataset outperform those trained on non-validated data, under the same base model and training setup. These results highlight the importance of high-quality, functionally validated datasets for RTL code generation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 567, + 194, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 567, + 194, + 578 + ], + "spans": [ + { + "bbox": [ + 55, + 567, + 194, + 578 + ], + "type": "text", + "content": "Our contributions are as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 579, + 301, + 711 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 56, + 579, + 301, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 579, + 301, + 651 + ], + "spans": [ + { + "bbox": [ + 56, + 579, + 301, + 651 + ], + "type": "text", + "content": "- We introduce VeriCoder, an RTL code generation model fine-tuned on a dataset validated for functional correctness. On the VerilogEval and RTLLM benchmarks, VeriCoder achieves state-of-the-art performance among open-source fine-tuned models, yielding relative pass@k gains of up to " + }, + { + "bbox": [ + 56, + 579, + 301, + 651 + ], + "type": "inline_equation", + "content": "71.7\\%" + }, + { + "bbox": [ + 56, + 579, + 301, + 651 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 56, + 579, + 301, + 651 + ], + "type": "inline_equation", + "content": "27.4\\%" + }, + { + "bbox": [ + 56, + 579, + 301, + 651 + ], + "type": "text", + "content": " over the prior best." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 651, + 301, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 651, + 301, + 711 + ], + "spans": [ + { + "bbox": [ + 56, + 651, + 301, + 711 + ], + "type": "text", + "content": "- We develop a dataset augmentation pipeline that combines unit test generation with feedback-directed refinement guided by a teacher LLM. This yields, to the best of our knowledge, the largest fine-tuning dataset to date with functional validation, consisting of 125,777 validated" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 45, + 719, + 301, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 719, + 301, + 749 + ], + "spans": [ + { + "bbox": [ + 45, + 719, + 301, + 749 + ], + "type": "text", + "content": "1While functional correctness is not fully guaranteed, we manually reviewed 100 randomly sampled examples and found that " + }, + { + "bbox": [ + 45, + 719, + 301, + 749 + ], + "type": "inline_equation", + "content": "92\\%" + }, + { + "bbox": [ + 45, + 719, + 301, + 749 + ], + "type": "text", + "content": " of the generated RTL code correctly matches the corresponding natural language descriptions." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 328, + 280, + 564, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 280, + 564, + 304 + ], + "spans": [ + { + "bbox": [ + 328, + 280, + 564, + 304 + ], + "type": "text", + "content": "triples of natural language specifications, RTL designs, and passing tests." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 320, + 304, + 564, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 304, + 564, + 363 + ], + "spans": [ + { + "bbox": [ + 320, + 304, + 564, + 363 + ], + "type": "text", + "content": "- We conduct an ablation study showing that functional validation during dataset construction improves model performance, underscoring the importance of using high-quality functionally validated datasets for RTL code generation." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 348, + 380, + 523, + 391 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 348, + 380, + 523, + 391 + ], + "spans": [ + { + "bbox": [ + 348, + 380, + 523, + 391 + ], + "type": "text", + "content": "II. BACKGROUND AND RELATED WORK" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 396, + 479, + 408 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 396, + 479, + 408 + ], + "spans": [ + { + "bbox": [ + 308, + 396, + 479, + 408 + ], + "type": "text", + "content": "A. Language Modeling and Fine-Tuning" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 411, + 564, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 411, + 564, + 471 + ], + "spans": [ + { + "bbox": [ + 307, + 411, + 564, + 471 + ], + "type": "text", + "content": "Large Language Models (LLMs) are deep neural networks trained to perform language modeling, a task where the model learns to predict the next token in a sequence. Formally, given a sequence of tokens " + }, + { + "bbox": [ + 307, + 411, + 564, + 471 + ], + "type": "inline_equation", + "content": "x = (x_{1}, x_{2}, \\ldots, x_{T})" + }, + { + "bbox": [ + 307, + 411, + 564, + 471 + ], + "type": "text", + "content": ", the training objective is to maximize the log-likelihood:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 373, + 479, + 564, + 512 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 479, + 564, + 512 + ], + "spans": [ + { + "bbox": [ + 373, + 479, + 564, + 512 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {L M}} = \\sum_ {t = 1} ^ {T} \\log P \\left(x _ {t} \\mid x _ {< t}; \\theta\\right), \\tag {1}", + "image_path": "efc2cd0a1e4cff36abd7b2e83b3cb5a92c4d06bb61768480b1b42d6d3437d91b.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 517, + 564, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 517, + 564, + 564 + ], + "spans": [ + { + "bbox": [ + 308, + 517, + 564, + 564 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 308, + 517, + 564, + 564 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 308, + 517, + 564, + 564 + ], + "type": "text", + "content": " denotes the model parameters and " + }, + { + "bbox": [ + 308, + 517, + 564, + 564 + ], + "type": "inline_equation", + "content": "x_{< t} = (x_1, \\ldots, x_{t-1})" + }, + { + "bbox": [ + 308, + 517, + 564, + 564 + ], + "type": "text", + "content": " represents the context tokens. This autoregressive objective enables the model to generate coherent text and capture long-range dependencies across various domains." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 318, + 564, + 564, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 564, + 564, + 577 + ], + "spans": [ + { + "bbox": [ + 318, + 564, + 564, + 577 + ], + "type": "text", + "content": "The training of LLMs is typically organized into two stages:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 319, + 578, + 563, + 674 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 319, + 578, + 563, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 578, + 563, + 626 + ], + "spans": [ + { + "bbox": [ + 319, + 578, + 563, + 626 + ], + "type": "text", + "content": "- Pre-training: The model is trained on massive, diverse corpora (e.g., web data, books, source code) to acquire broad knowledge and language understanding. This stage is expensive and performed once per model." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 319, + 627, + 563, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 627, + 563, + 674 + ], + "spans": [ + { + "bbox": [ + 319, + 627, + 563, + 674 + ], + "type": "text", + "content": "- Post-training: The pre-trained model is adapted to specific tasks using smaller, curated datasets. This stage includes supervised fine-tuning (SFT), where the model is trained on task-specific input-output pairs." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 307, + 677, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 677, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 307, + 677, + 564, + 749 + ], + "type": "text", + "content": "Since post-training large models from scratch is resource-intensive, researchers have developed parameter-efficient fin-tuning methods. One widely used approach is Low-Rank Adaptation (LoRA) [28]. Instead of updating the full weight matrices " + }, + { + "bbox": [ + 307, + 677, + 564, + 749 + ], + "type": "inline_equation", + "content": "W \\in \\mathbb{R}^{d \\times k}" + }, + { + "bbox": [ + 307, + 677, + 564, + 749 + ], + "type": "text", + "content": " in each linear layer, LoRA freezes the original weights and introduces a trainable low-rank update:" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 25, + 563, + 32 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 47, + 53, + 565, + 207 + ], + "blocks": [ + { + "bbox": [ + 47, + 53, + 565, + 207 + ], + "lines": [ + { + "bbox": [ + 47, + 53, + 565, + 207 + ], + "spans": [ + { + "bbox": [ + 47, + 53, + 565, + 207 + ], + "type": "image", + "image_path": "5c1e7c992b49adf8de5ea90499d94e6e076d00b68a9c604449489cfdfdbc9a13.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 195, + 212, + 412, + 224 + ], + "lines": [ + { + "bbox": [ + 195, + 212, + 412, + 224 + ], + "spans": [ + { + "bbox": [ + 195, + 212, + 412, + 224 + ], + "type": "text", + "content": "Fig. 1: LLM-guided dataset augmentation overview." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 47, + 238, + 229, + 370 + ], + "blocks": [ + { + "bbox": [ + 47, + 238, + 229, + 370 + ], + "lines": [ + { + "bbox": [ + 47, + 238, + 229, + 370 + ], + "spans": [ + { + "bbox": [ + 47, + 238, + 229, + 370 + ], + "type": "image", + "image_path": "36bafefd15fa00a56e86e549967356d7fadc2ba905d753cdbfae75fdbec5b723.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 380, + 230, + 403 + ], + "lines": [ + { + "bbox": [ + 45, + 380, + 230, + 403 + ], + "spans": [ + { + "bbox": [ + 45, + 380, + 230, + 403 + ], + "type": "text", + "content": "(a) Natural language specification taken from the Origen [8] dataset." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 248, + 237, + 392, + 373 + ], + "blocks": [ + { + "bbox": [ + 248, + 237, + 392, + 373 + ], + "lines": [ + { + "bbox": [ + 248, + 237, + 392, + 373 + ], + "spans": [ + { + "bbox": [ + 248, + 237, + 392, + 373 + ], + "type": "image", + "image_path": "ca37e8cb215359fe9ab488e17022939fb18c1f5bc9ddee7a7e9156793fa8f592.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 246, + 381, + 390, + 413 + ], + "lines": [ + { + "bbox": [ + 246, + 381, + 390, + 413 + ], + "spans": [ + { + "bbox": [ + 246, + 381, + 390, + 413 + ], + "type": "text", + "content": "(b) Buggy design taken from the Origen [8] dataset. It times out on the generated test shown in Figure 3." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 44, + 418, + 565, + 455 + ], + "lines": [ + { + "bbox": [ + 44, + 418, + 565, + 455 + ], + "spans": [ + { + "bbox": [ + 44, + 418, + 565, + 455 + ], + "type": "text", + "content": "Fig. 2: Natural language specification (left) and the corresponding buggy and corrected Verilog designs (middle and right). The specification and buggy design are from the original dataset [8], which lacks tests, while the test (Figure 3) and corrected design are generated by a teacher model (GPT-4o-mini) and included in our validated dataset." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 414, + 237, + 560, + 373 + ], + "blocks": [ + { + "bbox": [ + 414, + 237, + 560, + 373 + ], + "lines": [ + { + "bbox": [ + 414, + 237, + 560, + 373 + ], + "spans": [ + { + "bbox": [ + 414, + 237, + 560, + 373 + ], + "type": "image", + "image_path": "4a5ec9cc51cb0015f2ab1c778411473f2c44ef14e45f1231bc3002fe848b6dfa.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 412, + 381, + 558, + 413 + ], + "lines": [ + { + "bbox": [ + 412, + 381, + 558, + 413 + ], + "spans": [ + { + "bbox": [ + 412, + 381, + 558, + 413 + ], + "type": "text", + "content": "(c) Correct design fixed by the teacher model that passes the generated test in Figure 3." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 110, + 489, + 301, + 502 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 489, + 301, + 502 + ], + "spans": [ + { + "bbox": [ + 110, + 489, + 301, + 502 + ], + "type": "interline_equation", + "content": "W ^ {\\prime} = W + \\Delta W = W + A B, \\tag {2}", + "image_path": "00fce26a709a57a9ebe243142fabd9e290335c60132974be06478f7338456cf5.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 45, + 509, + 301, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 509, + 301, + 581 + ], + "spans": [ + { + "bbox": [ + 45, + 509, + 301, + 581 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 45, + 509, + 301, + 581 + ], + "type": "inline_equation", + "content": "A \\in \\mathbb{R}^{d \\times r}" + }, + { + "bbox": [ + 45, + 509, + 301, + 581 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 509, + 301, + 581 + ], + "type": "inline_equation", + "content": "B \\in \\mathbb{R}^{r \\times k}" + }, + { + "bbox": [ + 45, + 509, + 301, + 581 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 45, + 509, + 301, + 581 + ], + "type": "inline_equation", + "content": "r \\ll \\min(d, k)" + }, + { + "bbox": [ + 45, + 509, + 301, + 581 + ], + "type": "text", + "content": ". Only " + }, + { + "bbox": [ + 45, + 509, + 301, + 581 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 45, + 509, + 301, + 581 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 509, + 301, + 581 + ], + "type": "inline_equation", + "content": "B" + }, + { + "bbox": [ + 45, + 509, + 301, + 581 + ], + "type": "text", + "content": " are updated during training, while " + }, + { + "bbox": [ + 45, + 509, + 301, + 581 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 45, + 509, + 301, + 581 + ], + "type": "text", + "content": " remains unchanged. This technique reduces both memory and compute overhead during adaptation, making it feasible to specialize large LLMs to domain-specific applications, such as RTL generation, with limited computational resources." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 600, + 225, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 600, + 225, + 611 + ], + "spans": [ + { + "bbox": [ + 45, + 600, + 225, + 611 + ], + "type": "text", + "content": "B. Related Work on RTL Code Generation" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 45, + 616, + 300, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 616, + 300, + 688 + ], + "spans": [ + { + "bbox": [ + 45, + 616, + 300, + 688 + ], + "type": "text", + "content": "Progress on open-source RTL code generation is limited by the absence of large-scale, high-quality datasets. To mitigate this, recent efforts have focused on automated data mining and augmentation techniques to enrich existing corpora of RTL examples. Table I presents the comparison of different strategies for constructing fine-tuning datasets." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 45, + 689, + 301, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 689, + 301, + 749 + ], + "spans": [ + { + "bbox": [ + 45, + 689, + 301, + 749 + ], + "type": "text", + "content": "Mining open-source RTL designs is a common strategy for dataset construction. VeriGen [26] compiles Verilog modules from GitHub and textbooks into a structured corpus using automated syntax checks. BetterV [24] collects Verilog modules from the internet and then filters designs based on coding style" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 475, + 564, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 475, + 564, + 536 + ], + "spans": [ + { + "bbox": [ + 307, + 475, + 564, + 536 + ], + "type": "text", + "content": "and syntactic validity. CraftRTL [29] augments fine-tuning data with non-textual code representations, injecting synthetic errors derived from intermediate model checkpoints into open-source Verilog code. Other works [8], [30], [31] adopt similar methodologies for sourcing and preprocessing RTL code." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 539, + 564, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 539, + 564, + 658 + ], + "spans": [ + { + "bbox": [ + 307, + 539, + 564, + 658 + ], + "type": "text", + "content": "Another line of work leverages a commercial LLM for synthetic data generation. RTLCoder [6] prompts GPT-3.5 with domain keywords to generate both task descriptions and corresponding RTL, discarding any outputs that fail to compile. OriGen [8] further employs Claude 3.5 in a two-stage code-to-code pipeline: first turning mined RTL code into natural language specifications, then regenerating code from these specifications under compiler guidance, combining the strengths of real-world examples and synthetic generation. ChipGPT [27] transforms Verilog ASTs into natural language specifications." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 661, + 564, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 661, + 564, + 734 + ], + "spans": [ + { + "bbox": [ + 307, + 661, + 564, + 734 + ], + "type": "text", + "content": "While most of the existing work listed in Table I ensures syntax validity, none of them has any evidence of functional correctness. Without comprehensive unit tests or simulation-based feedback during dataset construction, models fine-tuned on these corpora may produce code that compiles but still fails to meet the intended natural language specification." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 318, + 736, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 736, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 318, + 736, + 564, + 749 + ], + "type": "text", + "content": "A recent work, OpenLLM-RTL [32], explores the idea of" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 55, + 55, + 294, + 278 + ], + "blocks": [ + { + "bbox": [ + 55, + 55, + 294, + 278 + ], + "lines": [ + { + "bbox": [ + 55, + 55, + 294, + 278 + ], + "spans": [ + { + "bbox": [ + 55, + 55, + 294, + 278 + ], + "type": "text", + "content": "`timescale 1ns/1ps\nmodule tb_and3;\nreg a = 0, b = 0, c = 0;\nwire y;\n// Instantiate the DUT (Design Under Test)\nand3 uut (.a(a), .b(b), .c(c), .y(y));\ninitial begin\n// Wait for signals to settle\n#1;\n// Set all inputs to 1; expected y = 1\n{a, b, c} = 3'b111;\n#1;\n// Check output, report error if incorrect\nif (y != 1'B1)\n$fatal(1, \"FAIL: y=%b (expected 1)\", y);\n$display(\"PASS\");\n$finish;\nend\nendmodule" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "verilog" + }, + { + "bbox": [ + 45, + 289, + 301, + 361 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 289, + 301, + 361 + ], + "spans": [ + { + "bbox": [ + 45, + 289, + 301, + 361 + ], + "type": "text", + "content": "Fig. 3: Unit test for the and3 module. The buggy design (Figure 2b) times out on this test, while the corrected design (Figure 2c) passes successfully. The test is generated by the teacher model GPT-4o-mini using the prompt in Figure 4a, and is used to validate and augment the original dataset, which contains no tests." + } + ] + } + ], + "index": 2, + "type": "text" + }, + { + "bbox": [ + 45, + 382, + 301, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 382, + 301, + 465 + ], + "spans": [ + { + "bbox": [ + 45, + 382, + 301, + 465 + ], + "type": "text", + "content": "using LLMs to generate assertions, producing a functionally verified dataset of 7k examples. While sharing the same goal of improving functional correctness in fine-tuning datasets, our work takes a different approach by generating unit tests for validation. Our final dataset contains over 125,777 examples—the largest functionally validated RTL dataset to date." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 466, + 301, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 466, + 301, + 658 + ], + "spans": [ + { + "bbox": [ + 45, + 466, + 301, + 658 + ], + "type": "text", + "content": "Beyond data collection and synthesis techniques, several works explore other methods to enhance RTL code generation quality. ScaleRTL [33] emphasizes reasoning by generating intermediate traces and leveraging test-time compute through iterative self-reflection. DeepRTL [34] adopts curriculum learning guided by multi-level natural language summaries. VeriSeek [35] applies reinforcement learning with feedback derived from AST-level similarity between LLM outputs and reference designs. AutoVCoder [36] incorporates retrieval-augmented generation (RAG), dynamically supplying relevant Verilog snippets to the model. CodeV [37] extends generation capabilities to tasks such as fill-in-the-middle (FIM). Our work adopts standard supervised fine-tuning while focusing on constructing a large-scale, functionally validated dataset. Our approach is complementary and orthogonal to existing techniques." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 127, + 670, + 219, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 670, + 219, + 681 + ], + "spans": [ + { + "bbox": [ + 127, + 670, + 219, + 681 + ], + "type": "text", + "content": "III. METHODOLOGY" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 685, + 101, + 696 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 685, + 101, + 696 + ], + "spans": [ + { + "bbox": [ + 45, + 685, + 101, + 696 + ], + "type": "text", + "content": "A. Overview" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 700, + 301, + 750 + ], + "type": "text", + "content": "We aim to improve the quality of fine-tuning datasets consisting of natural language specifications paired with syntactically correct Verilog designs, as seen in prior work [6]–[8], [24], [26]. These datasets, including Origen [8], contain" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 54, + 564, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 54, + 564, + 222 + ], + "spans": [ + { + "bbox": [ + 307, + 54, + 564, + 222 + ], + "type": "text", + "content": "Verilog designs that pass syntax checks but are not validated against unit tests to ensure functional correctness. To address this limitation, we introduce an automated dataset augmentation pipeline that leverages a teacher language model, e.g., GPT-40-mini, to validate each example through iterative refinement. As illustrated in Figure 1, given a natural language specification and an initial RTL design, the teacher model first generates a unit test. If the RTL design fails the simulation, the model iteratively revises the design based on the error message. When needed, it also updates the unit test to better align with the natural language specification. Although our experiments focus on augmenting the Origen dataset due to its size and quality, the proposed methodology is broadly applicable to any dataset lacking test validation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 222, + 564, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 222, + 564, + 329 + ], + "spans": [ + { + "bbox": [ + 308, + 222, + 564, + 329 + ], + "type": "text", + "content": "The pipeline begins with the original dataset " + }, + { + "bbox": [ + 308, + 222, + 564, + 329 + ], + "type": "inline_equation", + "content": "D = \\{(\\text{specification}, \\text{design})\\}" + }, + { + "bbox": [ + 308, + 222, + 564, + 329 + ], + "type": "text", + "content": ", where each RTL design is intended to implement a corresponding natural language specification. However, because no tests are provided, there is no evidence that the designs exhibit the intended functional behavior. For each pair, we prompt the teacher model, GPT-4o-mini, to generate a unit test for the design. The test is compiled and simulated with the design to check for correctness, where correctness means the design passes the simulation test." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 329, + 564, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 329, + 564, + 435 + ], + "spans": [ + { + "bbox": [ + 307, + 329, + 564, + 435 + ], + "type": "text", + "content": "If the simulation fails, we extract the resulting error message and re-invoke the teacher model using a refinement prompt. This prompt includes the specification, the current design and test, and the error message. The model attempts to resolve the failure by making minimal modifications to the design, the test, or both. This refinement process repeats iteratively: each candidate is re-simulated, and the cycle continues until the design passes the test or a maximum number of attempts is reached." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 308, + 436, + 565, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 436, + 565, + 508 + ], + "spans": [ + { + "bbox": [ + 308, + 436, + 565, + 508 + ], + "type": "text", + "content": "The final output is a validated dataset " + }, + { + "bbox": [ + 308, + 436, + 565, + 508 + ], + "type": "inline_equation", + "content": "D' = \\{(\\text{specification}, \\text{design}, \\text{test})\\}" + }, + { + "bbox": [ + 308, + 436, + 565, + 508 + ], + "type": "text", + "content": ", where each triplet contains a natural language specification, a Verilog design, and unit tests. A concrete motivating example is shown in Section III-B, and the details of the algorithm and prompts are provided in Section III-C." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 309, + 519, + 408, + 531 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 519, + 408, + 531 + ], + "spans": [ + { + "bbox": [ + 309, + 519, + 408, + 531 + ], + "type": "text", + "content": "B. Motivating Example" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 533, + 564, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 533, + 564, + 652 + ], + "spans": [ + { + "bbox": [ + 307, + 533, + 564, + 652 + ], + "type": "text", + "content": "Figure 2 presents a motivating example taken directly from the Origen dataset [8], highlighting a key limitation of datasets that rely only on syntax checks for validation. Prior work in RTL generation typically assumes that syntactic correctness is sufficient for fine-tuning, without verifying functionality through unit tests. This example demonstrates that a design can compile without errors yet fail to implement the intended behavior. It also illustrates how our method can automatically detect and correct such issues through test generation and iterative refinement." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 653, + 564, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 653, + 564, + 723 + ], + "spans": [ + { + "bbox": [ + 307, + 653, + 564, + 723 + ], + "type": "text", + "content": "This example includes a natural language specification (Figure 2a), a buggy RTL design from the original dataset (Figure 2b), and a corrected design produced by our pipeline (Figure 2c). The specification describes a simple combinational module, and3, which computes the bitwise AND of three one-bit inputs: a, b, and c." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 724, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 724, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 308, + 724, + 564, + 748 + ], + "type": "text", + "content": "The original design, though syntactically valid, is functionally incorrect due to several semantic issues. First, it misuses non-" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 25, + 563, + 31 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 25, + 563, + 31 + ], + "spans": [ + { + "bbox": [ + 558, + 25, + 563, + 31 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 57, + 134, + 69 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 57, + 134, + 69 + ], + "spans": [ + { + "bbox": [ + 56, + 57, + 134, + 69 + ], + "type": "text", + "content": "Prompt Template" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 74, + 288, + 112 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 74, + 288, + 112 + ], + "spans": [ + { + "bbox": [ + 55, + 74, + 288, + 112 + ], + "type": "text", + "content": "System Prompt You are a Verilog design and testing expert. Given a hardware specification described in natural language, your job is to generate both a correct Verilog module and a corresponding unit test that checks its functionality through simulation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 117, + 104, + 125 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 117, + 104, + 125 + ], + "spans": [ + { + "bbox": [ + 56, + 117, + 104, + 125 + ], + "type": "text", + "content": "User Prompt" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 126, + 211, + 152 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 56, + 126, + 211, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 126, + 211, + 135 + ], + "spans": [ + { + "bbox": [ + 56, + 126, + 211, + 135 + ], + "type": "text", + "content": "- Natural Language Specification: {NL Spec}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 136, + 173, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 136, + 173, + 144 + ], + "spans": [ + { + "bbox": [ + 56, + 136, + 173, + 144 + ], + "type": "text", + "content": "- Initial Implementation: {design}" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 145, + 99, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 145, + 99, + 152 + ], + "spans": [ + { + "bbox": [ + 56, + 145, + 99, + 152 + ], + "type": "text", + "content": "- Your task:" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 63, + 153, + 287, + 217 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 63, + 153, + 216, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 153, + 216, + 162 + ], + "spans": [ + { + "bbox": [ + 63, + 153, + 216, + 162 + ], + "type": "text", + "content": "1) Provide the unit tests for the given design." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 63, + 163, + 287, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 163, + 287, + 179 + ], + "spans": [ + { + "bbox": [ + 63, + 163, + 287, + 179 + ], + "type": "text", + "content": "2) Revise the Verilog implementation if the original design fails to pass your test cases." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 63, + 180, + 287, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 180, + 287, + 198 + ], + "spans": [ + { + "bbox": [ + 63, + 180, + 287, + 198 + ], + "type": "text", + "content": "3) Follow good coding practices, such as using meaningful comments to document key logic and decision points." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 63, + 198, + 242, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 198, + 242, + 206 + ], + "spans": [ + { + "bbox": [ + 63, + 198, + 242, + 206 + ], + "type": "text", + "content": "4) Use $fatal(1, \"msg\") to flag incorrect behavior." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 63, + 207, + 255, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 207, + 255, + 217 + ], + "spans": [ + { + "bbox": [ + 63, + 207, + 255, + 217 + ], + "type": "text", + "content": "5) Output format: {\"design\": \"...\", \"test\": \"...\"}" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 51, + 232, + 291, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 232, + 291, + 243 + ], + "spans": [ + { + "bbox": [ + 51, + 232, + 291, + 243 + ], + "type": "text", + "content": "(a) Prompt for generating a Verilog module's corresponding test" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 323, + 57, + 402, + 68 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 57, + 402, + 68 + ], + "spans": [ + { + "bbox": [ + 323, + 57, + 402, + 68 + ], + "type": "text", + "content": "Prompt Template" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 322, + 74, + 555, + 112 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 322, + 74, + 555, + 112 + ], + "spans": [ + { + "bbox": [ + 322, + 74, + 555, + 112 + ], + "type": "text", + "content": "System Prompt You are a Verilog design and testing expert. Analyze a failing design and its test, and make minimal yet sufficient edits to correct the issue while preserving the intended behavior specified in natural language." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 323, + 117, + 370, + 125 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 117, + 370, + 125 + ], + "spans": [ + { + "bbox": [ + 323, + 117, + 370, + 125 + ], + "type": "text", + "content": "User Prompt" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 324, + 126, + 479, + 161 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 324, + 126, + 479, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 126, + 479, + 135 + ], + "spans": [ + { + "bbox": [ + 324, + 126, + 479, + 135 + ], + "type": "text", + "content": "- Natural Language Specification: {NL Spec}" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 324, + 136, + 474, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 136, + 474, + 144 + ], + "spans": [ + { + "bbox": [ + 324, + 136, + 474, + 144 + ], + "type": "text", + "content": "- Previous Design and Test: {design}, {test}" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 324, + 145, + 451, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 145, + 451, + 153 + ], + "spans": [ + { + "bbox": [ + 324, + 145, + 451, + 153 + ], + "type": "text", + "content": "- Simulation Output: {error message}" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 324, + 154, + 367, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 154, + 367, + 161 + ], + "spans": [ + { + "bbox": [ + 324, + 154, + 367, + 161 + ], + "type": "text", + "content": "- Your task:" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 330, + 162, + 555, + 216 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 331, + 162, + 554, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 162, + 554, + 179 + ], + "spans": [ + { + "bbox": [ + 331, + 162, + 554, + 179 + ], + "type": "text", + "content": "1) Carefully identify the root cause of the failure by analyzing the code and the error message." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 330, + 180, + 554, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 180, + 554, + 198 + ], + "spans": [ + { + "bbox": [ + 330, + 180, + 554, + 198 + ], + "type": "text", + "content": "2) Make changes to either the design or the test (or both) to resolve the issue while maintaining correctness." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 330, + 198, + 555, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 330, + 198, + 555, + 216 + ], + "spans": [ + { + "bbox": [ + 330, + 198, + 555, + 216 + ], + "type": "text", + "content": "3) Output format: {\"explanation\": \"...\", \"design\": \"...\", \"test\": \"...\"}" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 334, + 232, + 543, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 232, + 543, + 243 + ], + "spans": [ + { + "bbox": [ + 334, + 232, + 543, + 243 + ], + "type": "text", + "content": "(b) Prompt for refining a failing Verilog design and test" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 45, + 249, + 563, + 274 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 249, + 563, + 274 + ], + "spans": [ + { + "bbox": [ + 45, + 249, + 563, + 274 + ], + "type": "text", + "content": "Fig. 4: Prompt templates provided to the teacher model for automated Verilog test generation and refinement, ensuring that the final design passes the generated test and matches the original natural language specification." + } + ] + } + ], + "index": 28, + "type": "text" + }, + { + "bbox": [ + 45, + 295, + 301, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 295, + 301, + 437 + ], + "spans": [ + { + "bbox": [ + 45, + 295, + 301, + 437 + ], + "type": "text", + "content": "blocking assignments " + }, + { + "bbox": [ + 45, + 295, + 301, + 437 + ], + "type": "inline_equation", + "content": "(<=)" + }, + { + "bbox": [ + 45, + 295, + 301, + 437 + ], + "type": "text", + "content": " inside a combinational always @\\* block, which can lead to counterintuitive synthesis results. Second, if instead used inside a sequential block, the sequence of non-blocking assignments in the design-y <= a, then y <= y & c, and finally y <= y & b—does not correctly compute and store in y the bitwise AND of a, b, and c. In particular, non-blocking assignments defer updates until the end of the current timestep, meaning that all assignments operate on the same initial value of y, and only the final assignment takes effect. Finally, if the non-blocking assignments were replaced with blocking ones, the code would introduce a combinational feedback loop, which cannot stabilize." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 45, + 438, + 301, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 438, + 301, + 509 + ], + "spans": [ + { + "bbox": [ + 45, + 438, + 301, + 509 + ], + "type": "text", + "content": "These types of errors occur because the RTL code in prior datasets, including Origen [8], is synthetically generated by teacher LLMs such as Claude 3.5 and filtered only through syntax checks. Without simulation or test-based validation, semantic bugs that affect functional correctness remain undetected." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 45, + 510, + 301, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 510, + 301, + 724 + ], + "spans": [ + { + "bbox": [ + 45, + 510, + 301, + 724 + ], + "type": "text", + "content": "We provide the natural language specification and the buggy RTL design to the teacher model GPT-4o-mini, prompting it to generate a unit test using the template shown in Figure 4a (further detailed in Section III-C). The resulting test is shown in Figure 3, which sets all three inputs to 1 and checks whether the output y evaluates to 1 as expected. When the buggy design (Figure 2b) is simulated with this test, it hangs and ultimately times out. The bug exemplifies a combinational loop. The always @* block is meant for combinational logic and its evaluation is triggered upon changes to any of the variables read inside the block. In this case, an evaluation of the block is triggered when either y, a, b, or c changes. However, y is both read (on the RHS) and written (on the LHS) in the same block. Upon evaluating the block, it schedules an update to y, which causes a change to y. This change retriggers the block, leading to another scheduled update to y, and so on. This loop continues indefinitely, preventing the simulation from converging." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "spans": [ + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "type": "text", + "content": "The corrected version replaces the non-blocking assignments with a single blocking assignment " + }, + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "type": "inline_equation", + "content": "(=)" + }, + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "type": "text", + "content": ", ensuring that " + }, + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 45, + 724, + 301, + 749 + ], + "type": "text", + "content": " is updated" + } + ] + } + ], + "index": 32 + }, + { + "type": "code", + "bbox": [ + 310, + 308, + 564, + 555 + ], + "blocks": [ + { + "bbox": [ + 309, + 292, + 547, + 304 + ], + "lines": [ + { + "bbox": [ + 309, + 292, + 547, + 304 + ], + "spans": [ + { + "bbox": [ + 309, + 292, + 547, + 304 + ], + "type": "text", + "content": "Algorithm 1 Dataset Augmentation with a Teacher LLM" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "lines": [ + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "spans": [ + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "text", + "content": "Input: Original dataset " + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "D = \\{(s_i,d_i)\\}_{i = 1}^N" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "\\triangleright s_i" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "text", + "content": " : NL specification; " + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "d_{i}" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "text", + "content": " : RTL design Maximum attempts " + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "text", + "content": " \nDefine: GenTestTpl " + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "text", + "content": " prompt template for test generation RefineTpl " + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "text", + "content": " prompt template for iterative refinement \nOutput: Augmented dataset " + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "D^{\\prime} = \\{(s_{i},d_{i},t_{i})\\}_{i = 1}^{M}" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "\\triangleright t_i" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "text", + "content": " : Generated unit test \n1: " + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "D^{\\prime}\\gets \\emptyset" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "text", + "content": " \n2: for each " + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "(s,d)\\in D" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "text", + "content": " do \n3: attempt " + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "\\leftarrow 0" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "text", + "content": " success " + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "text", + "content": " false \n4: while attempt " + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "< T" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "text", + "content": " ∧ ¬success do \n5: attempt " + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "text", + "content": " attempt + 1 \n6: if attempt " + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "= = 1" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "text", + "content": " then \n7: d,t " + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "text", + "content": " LLMInvoke(GenTestTpl,s,d) \n8: else \n9: d,t " + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "text", + "content": " LLMInvoke(RefineTpl,s,d,t,err) \n10: success, err " + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "\\leftarrow" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "text", + "content": " RunVerilogTest(d,t) \n11: if success then \n12: " + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "D^{\\prime}\\gets D^{\\prime}\\cup \\{(s,d,t)\\}" + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "text", + "content": " \n13: return " + }, + { + "bbox": [ + 310, + 308, + 564, + 555 + ], + "type": "inline_equation", + "content": "D^{\\prime}" + } + ] + } + ], + "index": 34, + "angle": 0, + "type": "code_body" + } + ], + "index": 34, + "sub_type": "algorithm" + }, + { + "bbox": [ + 308, + 575, + 563, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 575, + 563, + 611 + ], + "spans": [ + { + "bbox": [ + 308, + 575, + 563, + 611 + ], + "type": "text", + "content": "immediately with the result of a & b & c, as required by the specification. This version passes the test generated by the teacher model and behaves correctly under simulation." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 307, + 612, + 564, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 612, + 564, + 683 + ], + "spans": [ + { + "bbox": [ + 307, + 612, + 564, + 683 + ], + "type": "text", + "content": "This example underscores the importance of functional validation in RTL datasets. Syntax checks alone cannot catch subtle but critical semantic errors. Our methodology, through teacher-driven test generation and iterative refinement, ensures that each design in the augmented dataset is not only syntactically valid but also functionally validated with unit tests." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 309, + 697, + 422, + 709 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 697, + 422, + 709 + ], + "spans": [ + { + "bbox": [ + 309, + 697, + 422, + 709 + ], + "type": "text", + "content": "C. Algorithm and Prompts" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 308, + 712, + 564, + 749 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 712, + 564, + 749 + ], + "spans": [ + { + "bbox": [ + 308, + 712, + 564, + 749 + ], + "type": "text", + "content": "Algorithm 1 presents our automated pipeline for transforming an unvalidated RTL dataset into a functionally validated one. Starting from a dataset " + }, + { + "bbox": [ + 308, + 712, + 564, + 749 + ], + "type": "inline_equation", + "content": "D = \\{(s_i, d_i)\\}_{i=1}^N" + }, + { + "bbox": [ + 308, + 712, + 564, + 749 + ], + "type": "text", + "content": ", where each" + } + ] + } + ], + "index": 38 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 32 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 138 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 138 + ], + "type": "text", + "content": "example consists of a natural language specification " + }, + { + "bbox": [ + 45, + 55, + 301, + 138 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 45, + 55, + 301, + 138 + ], + "type": "text", + "content": " and a corresponding RTL design " + }, + { + "bbox": [ + 45, + 55, + 301, + 138 + ], + "type": "inline_equation", + "content": "d_i" + }, + { + "bbox": [ + 45, + 55, + 301, + 138 + ], + "type": "text", + "content": " (e.g., from Origen [8]), the goal is to generate a unit test " + }, + { + "bbox": [ + 45, + 55, + 301, + 138 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 45, + 55, + 301, + 138 + ], + "type": "text", + "content": " that validates the functional correctness of the design. If the design fails to pass the test, we invoke an iterative refinement loop that updates the design and test until it passes or a maximum number of attempts " + }, + { + "bbox": [ + 45, + 55, + 301, + 138 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 45, + 55, + 301, + 138 + ], + "type": "text", + "content": " is reached. We set " + }, + { + "bbox": [ + 45, + 55, + 301, + 138 + ], + "type": "inline_equation", + "content": "T = 5" + }, + { + "bbox": [ + 45, + 55, + 301, + 138 + ], + "type": "text", + "content": " in our experiments." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 138, + 301, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 138, + 301, + 222 + ], + "spans": [ + { + "bbox": [ + 45, + 138, + 301, + 222 + ], + "type": "text", + "content": "The procedure is powered by a teacher model, GPT-4o-mini, which corresponds to the LLMInvoke calls in Algorithm 1. While stronger models such as GPT-4o or o3-mini may yield better performance, we use GPT-4o-mini in practice because of the large size of the dataset (217,462 examples in Origen) and the high cost associated with repeated API queries to OpenAI models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 222, + 301, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 222, + 301, + 294 + ], + "spans": [ + { + "bbox": [ + 45, + 222, + 301, + 294 + ], + "type": "text", + "content": "The process begins by prompting the teacher model with the test generation template (Figure 4a), together with a natural language specification and its initial RTL design (e.g., Figure 2a and Figure 2b). The model then produces a candidate unit test (e.g., Figure 3) designed to check whether the design satisfies the intended functionality under simulation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 294, + 301, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 294, + 301, + 390 + ], + "spans": [ + { + "bbox": [ + 45, + 294, + 301, + 390 + ], + "type": "text", + "content": "The design and test are compiled and simulated using standard Verilog tooling. If the test fails, for example due to a timeout, incorrect output, or another runtime error, we construct a refinement prompt (Figure 4b) that includes the specification, the failing design and test, and the simulation error message (corresponding to the err variable in Algorithm 1). This prompt is then passed to the teacher model, which attempts to fix the issue by making edits to the design, the test, or both." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 390, + 301, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 390, + 301, + 438 + ], + "spans": [ + { + "bbox": [ + 45, + 390, + 301, + 438 + ], + "type": "text", + "content": "The refinement process repeats until the updated design passes simulation or the maximum number of attempts " + }, + { + "bbox": [ + 45, + 390, + 301, + 438 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 45, + 390, + 301, + 438 + ], + "type": "text", + "content": " is reached. Once a design successfully passes its test, the validated triple " + }, + { + "bbox": [ + 45, + 390, + 301, + 438 + ], + "type": "inline_equation", + "content": "(s_i, d_i, t_i)" + }, + { + "bbox": [ + 45, + 390, + 301, + 438 + ], + "type": "text", + "content": " is added to the output dataset " + }, + { + "bbox": [ + 45, + 390, + 301, + 438 + ], + "type": "inline_equation", + "content": "D'" + }, + { + "bbox": [ + 45, + 390, + 301, + 438 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 438, + 301, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 438, + 301, + 510 + ], + "spans": [ + { + "bbox": [ + 45, + 438, + 301, + 510 + ], + "type": "text", + "content": "This strategy enables systematic detection and correction of subtle RTL bugs that cannot be identified through syntax checks alone. By integrating LLM-based test generation and iterative refinement into the dataset construction pipeline, we produce a dataset that is not only syntactically valid but also functionally validated through simulation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 510, + 301, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 510, + 301, + 619 + ], + "spans": [ + { + "bbox": [ + 45, + 510, + 301, + 619 + ], + "type": "text", + "content": "While functional correctness under all possible inputs cannot be guaranteed, the inclusion of unit tests makes our augmented dataset substantially more robust than prior approaches that rely solely on syntactic checking. We view this as a practical and scalable step toward building higher-quality fine-tuning datasets for RTL generation. To assess quality, we manually reviewed 100 randomly sampled examples and found that " + }, + { + "bbox": [ + 45, + 510, + 301, + 619 + ], + "type": "inline_equation", + "content": "92\\%" + }, + { + "bbox": [ + 45, + 510, + 301, + 619 + ], + "type": "text", + "content": " of the generated RTL code correctly matched the corresponding natural language descriptions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 113, + 633, + 233, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 633, + 233, + 643 + ], + "spans": [ + { + "bbox": [ + 113, + 633, + 233, + 643 + ], + "type": "text", + "content": "IV. EXPERIMENTAL SETUP" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 649, + 94, + 659 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 649, + 94, + 659 + ], + "spans": [ + { + "bbox": [ + 45, + 649, + 94, + 659 + ], + "type": "text", + "content": "A. Dataset" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 664, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 664, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 664, + 301, + 750 + ], + "type": "text", + "content": "Following the methodology described in Section III, we construct a fine-tuning dataset comprising 125,777 examples. Each example includes a natural language specification, a corresponding RTL design, and associated unit tests. Table II summarizes key statistics: the specifications contain an average of 247 words (ranging from 116 to 549), RTL implementations average 35 lines of code (ranging from 5 to 225), and unit" + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 322, + 50, + 552, + 119 + ], + "blocks": [ + { + "bbox": [ + 322, + 50, + 552, + 119 + ], + "lines": [ + { + "bbox": [ + 322, + 50, + 552, + 119 + ], + "spans": [ + { + "bbox": [ + 322, + 50, + 552, + 119 + ], + "type": "table", + "html": "
CategoryCountLength
MinMaxAvg
NL specification (words)116549247
Design (lines of RTL)125,777522535
Unit tests (lines of RTL)619755
", + "image_path": "5c4b5c5dd73b0cbcb62a0a21938683d163731bae2003182c9845173afbe9b6cb.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 123, + 565, + 159 + ], + "lines": [ + { + "bbox": [ + 308, + 123, + 565, + 159 + ], + "spans": [ + { + "bbox": [ + 308, + 123, + 565, + 159 + ], + "type": "text", + "content": "TABLE II: Dataset statistics: total number of examples and length distributions for natural language specifications, RTL implementations, and unit tests in the VeriCoder dataset." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 308, + 181, + 565, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 181, + 565, + 217 + ], + "spans": [ + { + "bbox": [ + 308, + 181, + 565, + 217 + ], + "type": "text", + "content": "tests average 55 lines (ranging from 6 to 197). We use the specification-solution pairs from this dataset to train our model, VeriCoder." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 309, + 234, + 427, + 246 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 234, + 427, + 246 + ], + "spans": [ + { + "bbox": [ + 309, + 234, + 427, + 246 + ], + "type": "text", + "content": "B. LoRA Fine-Tuning Setup" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 248, + 565, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 248, + 565, + 357 + ], + "spans": [ + { + "bbox": [ + 307, + 248, + 565, + 357 + ], + "type": "text", + "content": "Following standard practices for LLM fine-tuning, we fine-tune the base model of Qwen2.5-14B-Instruct using Low-Rank Adaptation (LoRA, described in Section II-A), with a rank of 16 and a scaling factor of 32 to all linear projection layers in the transformer. Training is conducted over 3 epochs with a batch size of 40. We adopt a constant learning rate of " + }, + { + "bbox": [ + 307, + 248, + 565, + 357 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-5}" + }, + { + "bbox": [ + 307, + 248, + 565, + 357 + ], + "type": "text", + "content": ", paired with a linear decay scheduler and a warm-up ratio of 0.05. The optimizer is used with a weight decay of " + }, + { + "bbox": [ + 307, + 248, + 565, + 357 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 307, + 248, + 565, + 357 + ], + "type": "text", + "content": " and gradient clipping is applied with a maximum norm of 1." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 309, + 373, + 429, + 384 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 373, + 429, + 384 + ], + "spans": [ + { + "bbox": [ + 309, + 373, + 429, + 384 + ], + "type": "text", + "content": "C. Benchmarks and Metrics" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 307, + 388, + 564, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 388, + 564, + 460 + ], + "spans": [ + { + "bbox": [ + 307, + 388, + 564, + 460 + ], + "type": "text", + "content": "Following the evaluation protocol established in prior work [7], [8], we benchmark against VerilogEval [9] and RTLLM [10]. For VerilogEval, we report the standard Pass@k metric with " + }, + { + "bbox": [ + 307, + 388, + 564, + 460 + ], + "type": "inline_equation", + "content": "k \\in \\{1,5,10\\}" + }, + { + "bbox": [ + 307, + 388, + 564, + 460 + ], + "type": "text", + "content": ", which estimates the expected probability that at least one of the top- " + }, + { + "bbox": [ + 307, + 388, + 564, + 460 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 307, + 388, + 564, + 460 + ], + "type": "text", + "content": " generated programs passes all test cases. The metric is defined as:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 379, + 464, + 491, + 498 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 379, + 464, + 491, + 498 + ], + "spans": [ + { + "bbox": [ + 379, + 464, + 491, + 498 + ], + "type": "interline_equation", + "content": "\\operatorname {P a s s} @ k = \\mathbb {E} \\left[ 1 - \\frac {\\binom {n - c} {k}}{\\binom {n} {k}} \\right]", + "image_path": "efb2efdf41bf536b56a5cb752b5e9196ea03bb35d4a7d8a0f2ff0503a08de96e.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 501, + 564, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 501, + 564, + 574 + ], + "spans": [ + { + "bbox": [ + 307, + 501, + 564, + 574 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 307, + 501, + 564, + 574 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 307, + 501, + 564, + 574 + ], + "type": "text", + "content": " is the total number of generated programs and " + }, + { + "bbox": [ + 307, + 501, + 564, + 574 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 307, + 501, + 564, + 574 + ], + "type": "text", + "content": " is the number of correct ones. All test cases are manually created by experts who design the benchmarks. In all evaluations, we set " + }, + { + "bbox": [ + 307, + 501, + 564, + 574 + ], + "type": "inline_equation", + "content": "n = 10" + }, + { + "bbox": [ + 307, + 501, + 564, + 574 + ], + "type": "text", + "content": ". For RTLLM, we report both syntax correctness and functional correctness using Pass@5. This evaluation setup aligns with that used in prior work [8]." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 309, + 590, + 419, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 590, + 419, + 601 + ], + "spans": [ + { + "bbox": [ + 309, + 590, + 419, + 601 + ], + "type": "text", + "content": "D. Models for Evaluation" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 307, + 604, + 564, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 604, + 564, + 712 + ], + "spans": [ + { + "bbox": [ + 307, + 604, + 564, + 712 + ], + "type": "text", + "content": "We evaluate two groups of models. The first group consists of pretrained-only base models, including OpenAI's latest releases (o4-mini, o3-mini, GPT-4o, GPT-4o-mini), Google's Gemini 2.0 Flash, DeepSeek's R1 and DeepSeek-Coder-7B-v1.5 (the base model used in prior work [8]), Meta's LLaMA2-7B model, and Alibaba's Qwen2.5-14B-Instruct (our base model for fine-tuning). The second group includes fine-tuned models with released weights from prior work: Origen [8], RTLCoder [6], and ChipGPT [27]." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 308, + 712, + 564, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 712, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 308, + 712, + 564, + 750 + ], + "type": "text", + "content": "To ensure a fair comparison, we use identical input prompts and post-processing scripts across all models. For models released by prior work, we do not adopt their model-specific" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 25, + 563, + 31 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 25, + 563, + 31 + ], + "spans": [ + { + "bbox": [ + 558, + 25, + 563, + 31 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 50, + 560, + 278 + ], + "blocks": [ + { + "bbox": [ + 50, + 50, + 560, + 278 + ], + "lines": [ + { + "bbox": [ + 50, + 50, + 560, + 278 + ], + "spans": [ + { + "bbox": [ + 50, + 50, + 560, + 278 + ], + "type": "table", + "html": "
Model TypeEvaluated ModelVerilogEval V1.0 [9] \n(using pass@k metric)RTLLM V1.1 [10] \n(using pass@5 metric)
Eval-Machine (%)Eval-Human (%)Syntax-VCS (%)Functional (%)
k=1k=5k=10k=1k=5k=10
Base Modelso4-mini-2025-04-1661.967.868.664.366.467.186.272.4
GPT-4o-2024-11-2063.766.567.154.360.462.2100.069.0
GPT-4o-mini-2024-07-1855.762.464.344.751.655.189.765.5
DeepSeek-R165.770.972.062.869.169.979.358.6
o3-mini-2025-01-3166.471.672.062.068.969.969.055.2
Qwen2.5-14B-Instruct47.854.255.235.340.042.369.041.4
Gemini-2.0-flash-00160.362.663.652.157.659.065.534.5
DeepSeek-R1-Distill-Qwen-14B46.264.168.536.751.755.162.134.5
DeepSeek-Coder-7B-v1.544.458.962.925.840.244.948.324.1
LLaMA-2-7B7.015.618.90.42.13.83.40.0
Fine-Tuned Models \n(Prior Work)OriGen [8]35.965.168.522.347.551.951.737.9
RTLCoder-DeepSeek [6]22.051.457.314.735.242.317.210.3
RTLCoder-Mistral [6]17.646.456.612.431.536.53.40.0
ChipGPT-LLaMA3.1-8B-SFT [27]17.646.456.612.431.536.513.80.0
ChipGPT-LLaMA2-SFT-7B [27]0.94.27.70.62.23.86.90.0
Our WorkVeriCoder55.762.964.338.349.251.979.348.3
", + "image_path": "1c236d229de3d510d104e28dbad8d6ae03c635914ee6ce1bf2d818846dae0d09.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 45, + 283, + 564, + 309 + ], + "lines": [ + { + "bbox": [ + 45, + 283, + 564, + 309 + ], + "spans": [ + { + "bbox": [ + 45, + 283, + 564, + 309 + ], + "type": "text", + "content": "TABLE III: RTL code generation performance across models. To ensure a fair comparison, we use the same input prompts and apply identical post-processing scripts, running inference with model weights released by prior work." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 1 + }, + { + "bbox": [ + 45, + 329, + 301, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 329, + 301, + 401 + ], + "spans": [ + { + "bbox": [ + 45, + 329, + 301, + 401 + ], + "type": "text", + "content": "prompts [8] or inference pipelines [6], [27]. Instead, we apply a uniform evaluation script, with the only variable being the model under test. This standardization is critical, as both input formatting and post-processing can significantly affect performance. By controlling these factors, we isolate model capability and enable a fair comparison." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 145, + 416, + 201, + 427 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 416, + 201, + 427 + ], + "spans": [ + { + "bbox": [ + 145, + 416, + 201, + 427 + ], + "type": "text", + "content": "V. RESULTS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 433, + 164, + 443 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 433, + 164, + 443 + ], + "spans": [ + { + "bbox": [ + 45, + 433, + 164, + 443 + ], + "type": "text", + "content": "A. Main Evaluation Results" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 449, + 301, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 449, + 301, + 460 + ], + "spans": [ + { + "bbox": [ + 55, + 449, + 301, + 460 + ], + "type": "text", + "content": "Table III shows the results. Our major findings are as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "spans": [ + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "text", + "content": "a) Comparison with prior work: VeriCoder achieves state-of-the-art results across two RTL code generation benchmarks, outperforming all previously released open-source finetuned models. On VerilogEval-Machine, VeriCoder attains a pass@1 accuracy of " + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "inline_equation", + "content": "55.7\\%" + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "text", + "content": ", representing a 19.8 percentage point improvement over the best prior model, OriGen. On VerilogEval-Human, it reaches " + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "inline_equation", + "content": "38.3\\%" + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "text", + "content": ", exceeding OriGen by 16.0 percentage points. Across all evaluated " + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "text", + "content": "-shot settings " + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "inline_equation", + "content": "(k = 1, 5, 10)" + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "text", + "content": ", VeriCoder consistently maintains its lead on the Human split. On the RTLLM benchmark, VeriCoder achieves " + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "inline_equation", + "content": "79.3\\%" + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "text", + "content": " syntax correctness and " + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "inline_equation", + "content": "48.3\\%" + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "text", + "content": " functional correctness, surpassing OriGen's " + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "inline_equation", + "content": "51.7\\%" + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "inline_equation", + "content": "37.9\\%" + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "text", + "content": ", respectively. In conclusion, VeriCoder delivers relative improvements of up to " + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "inline_equation", + "content": "71.7\\%" + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "text", + "content": " on VerilogEval and " + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "inline_equation", + "content": "27.4\\%" + }, + { + "bbox": [ + 45, + 461, + 301, + 651 + ], + "type": "text", + "content": " on RTLLM in pass@k accuracy, surpassing the previous state-of-the-art model on both benchmarks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 45, + 652, + 301, + 750 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 652, + 301, + 750 + ], + "spans": [ + { + "bbox": [ + 45, + 652, + 301, + 750 + ], + "type": "text", + "content": "To better understand the relatively low performance of ChipGPT [27], we examined its outputs in detail. We observed that its generated RTL designs often include module headers that deviate from the given specifications, revealing difficulty in precise instruction following. Moreover, its base model, LLaMA2-7B, performs even worse, suggesting that limitations in the instruction-following capabilities of the underlying pretrained model constrain the effectiveness of the fine-tuned" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 329, + 564, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 329, + 564, + 390 + ], + "spans": [ + { + "bbox": [ + 307, + 329, + 564, + 390 + ], + "type": "text", + "content": "variant. For a fair comparison, we do not apply any of the model-specific customized post-processing scripts that attempt to fix syntax or header issues. Instead, we use a standardized evaluation script for all models, extracting Verilog code as-is to ensure consistency." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 407, + 564, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 407, + 564, + 552 + ], + "spans": [ + { + "bbox": [ + 307, + 407, + 564, + 552 + ], + "type": "text", + "content": "b) Effectiveness of our fine-tuning: Starting from Qwen-2.5-14B-Instruct as our base model, VeriCoder delivers substantial gains across VerilogEval. On the VerilogEvalMachine split, pass@1 jumps up by " + }, + { + "bbox": [ + 307, + 407, + 564, + 552 + ], + "type": "inline_equation", + "content": "7.6\\%" + }, + { + "bbox": [ + 307, + 407, + 564, + 552 + ], + "type": "text", + "content": ", pass@5 by " + }, + { + "bbox": [ + 307, + 407, + 564, + 552 + ], + "type": "inline_equation", + "content": "4.0\\%" + }, + { + "bbox": [ + 307, + 407, + 564, + 552 + ], + "type": "text", + "content": ", and pass@10 by " + }, + { + "bbox": [ + 307, + 407, + 564, + 552 + ], + "type": "inline_equation", + "content": "2.1\\%" + }, + { + "bbox": [ + 307, + 407, + 564, + 552 + ], + "type": "text", + "content": ", and VerilogEval-Human reflects the same trend. On RTLLM, functional pass@5 is " + }, + { + "bbox": [ + 307, + 407, + 564, + 552 + ], + "type": "inline_equation", + "content": "7\\%" + }, + { + "bbox": [ + 307, + 407, + 564, + 552 + ], + "type": "text", + "content": " higher than its base model. Specifically, VeriCoder even marginally outperforms one of the commercial models, Google's Gemini2.0-flash, on pass@5 and pass@10 metrics of Eval-Machine as well as on RTLLM. Together, these results demonstrate that our fine-tuning process and our validated dataset significantly boost pass@k metrics and semantic correctness in RTL generation." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "type": "text", + "content": "c) Model gap remains: Despite the observed improvements, a substantial performance gap persists between VeriCoder and the strongest large models. For instance, o3-mini attains " + }, + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "type": "inline_equation", + "content": "66.4\\%" + }, + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "type": "text", + "content": " on VerilogEval Pass@1 compared to VeriCoder's " + }, + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "type": "inline_equation", + "content": "55.7\\%" + }, + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "type": "text", + "content": ". DeepSeek-R1 achieves " + }, + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "type": "inline_equation", + "content": "69.1\\%" + }, + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "type": "text", + "content": " on human-graded Pass@5, versus VeriCoder's " + }, + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "type": "inline_equation", + "content": "49.2\\%" + }, + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "type": "text", + "content": ". Commercial LLMs such as GPT-4o reach a perfect " + }, + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "type": "inline_equation", + "content": "100.0\\%" + }, + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "type": "text", + "content": " Syntax-VCS validity and " + }, + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "type": "inline_equation", + "content": "69.0\\%" + }, + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "type": "text", + "content": " functional correctness, while VeriCoder records " + }, + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "type": "inline_equation", + "content": "79.3\\%" + }, + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "type": "inline_equation", + "content": "48.3\\%" + }, + { + "bbox": [ + 307, + 568, + 564, + 748 + ], + "type": "text", + "content": ", respectively. Despite the performance gap, open-source lightweight models offer compelling advantages. They provide transparency, allow for local deployment, and ensure intellectual property protection, i.e., capabilities that are particularly important for RTL design workflows where security, customizability, and integration into existing toolchains are critical." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 24, + 563, + 31 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 31 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 31 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 50, + 304, + 129 + ], + "blocks": [ + { + "bbox": [ + 47, + 50, + 304, + 129 + ], + "lines": [ + { + "bbox": [ + 47, + 50, + 304, + 129 + ], + "spans": [ + { + "bbox": [ + 47, + 50, + 304, + 129 + ], + "type": "table", + "html": "
ModelVerilogEval [9] (Pass@5)RTLLM [10] (Pass@5)
SyntaxFunc
Qwen2.5-14B-Instruct (base)46.869.041.4
Qwen w/ unvalidated data53.575.944.8
Qwen w/ validated data55.879.348.3
", + "image_path": "05504b8cfde9e73c710b4509186e765d80b7e2614e9c4341ab0ec20f2649812f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 47, + 134, + 299, + 179 + ], + "lines": [ + { + "bbox": [ + 47, + 134, + 299, + 179 + ], + "spans": [ + { + "bbox": [ + 47, + 134, + 299, + 179 + ], + "type": "text", + "content": "TABLE IV: We performed fine-tuning on the same base model using a functionally validated dataset and the functionally unvalidated dataset [8]. We report Pass@5 metrics for all models on two benchmarks." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 206, + 168, + 217 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 206, + 168, + 217 + ], + "spans": [ + { + "bbox": [ + 48, + 206, + 168, + 217 + ], + "type": "text", + "content": "B. Ablation Study of Dataset" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 224, + 300, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 224, + 300, + 319 + ], + "spans": [ + { + "bbox": [ + 47, + 224, + 300, + 319 + ], + "type": "text", + "content": "To assess the impact of dataset quality on RTL code generation, we conduct an ablation study using the same base model, Qwen2.5-14B-Instruct, fine-tuned on two datasets: (1) the unvalidated OriGen dataset from prior work [8], and (2) our newly curated, functionally validated dataset. All factors, including dataset size, fine-tuning hyperparameters, training procedures, and evaluation settings, are held constant to ensure a fair comparison." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "spans": [ + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "text", + "content": "Across all metrics, we observe a consistent improvement as dataset quality increases. On the VerilogEval benchmark (covering both Machine and Human subsets), the base model achieves " + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "inline_equation", + "content": "46.8\\%" + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "text", + "content": " Pass@5. Fine-tuning on the unvalidated dataset raises performance to " + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "inline_equation", + "content": "53.5\\%" + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "text", + "content": ", while our validated dataset further improves it to " + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "inline_equation", + "content": "55.8\\%" + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "text", + "content": ". For RTLLM syntax correctness, the trend is similar: " + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "inline_equation", + "content": "69.0\\%" + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "text", + "content": " for the base model, " + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "inline_equation", + "content": "75.9\\%" + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "text", + "content": " for the unvalidated version, and " + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "inline_equation", + "content": "79.3\\%" + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "text", + "content": " when trained on validated data. Functional correctness sees even more significant improvement, rising from " + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "inline_equation", + "content": "41.4\\%" + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "text", + "content": " (base) to " + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "inline_equation", + "content": "44.8\\%" + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "text", + "content": " (unvalidated) and ultimately to " + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "inline_equation", + "content": "48.3\\%" + }, + { + "bbox": [ + 47, + 321, + 299, + 451 + ], + "type": "text", + "content": " (validated)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 453, + 299, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 453, + 299, + 501 + ], + "spans": [ + { + "bbox": [ + 47, + 453, + 299, + 501 + ], + "type": "text", + "content": "These results demonstrate that functionally validated data provides more effective supervision than existing unvalidated data. This also underscores the importance of dataset quality in fine-tuning LLMs for RTL code generation." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 526, + 249, + 536 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 526, + 249, + 536 + ], + "spans": [ + { + "bbox": [ + 48, + 526, + 249, + 536 + ], + "type": "text", + "content": "C. Test Passing Rates of Non-Validated Datasets" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 544, + 300, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 544, + 300, + 674 + ], + "spans": [ + { + "bbox": [ + 47, + 544, + 300, + 674 + ], + "type": "text", + "content": "We examine the quality of fine-tuning datasets released by prior work by evaluating their passing rates against our synthetic unit tests generated by the teacher model GPT-4o-mini. For each corpus, we randomly sample 1,000 Verilog implementations and apply the test generation and refinement pipeline described in Section III. We then run corresponding unit tests against the original design and measure the proportion of the original designs that successfully pass the generated tests. As shown in Table V, only " + }, + { + "bbox": [ + 47, + 544, + 300, + 674 + ], + "type": "inline_equation", + "content": "24.4\\%" + }, + { + "bbox": [ + 47, + 544, + 300, + 674 + ], + "type": "text", + "content": " examples of the RTLCoder dataset [6] pass our functional tests, while OriGen [8] reaches " + }, + { + "bbox": [ + 47, + 544, + 300, + 674 + ], + "type": "inline_equation", + "content": "53.5\\%" + }, + { + "bbox": [ + 47, + 544, + 300, + 674 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 677, + 299, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 677, + 299, + 748 + ], + "spans": [ + { + "bbox": [ + 47, + 677, + 299, + 748 + ], + "type": "text", + "content": "OriGen's higher pass rate aligns with its stronger code generation results in Table III, hinting at a positive link between dataset validity and downstream performance. These findings highlight the potential value of incorporating functional correctness validation into fine-tuning dataset curation for better RTL code generation." + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 317, + 50, + 557, + 94 + ], + "blocks": [ + { + "bbox": [ + 317, + 50, + 557, + 94 + ], + "lines": [ + { + "bbox": [ + 317, + 50, + 557, + 94 + ], + "spans": [ + { + "bbox": [ + 317, + 50, + 557, + 94 + ], + "type": "table", + "html": "
Prior Datasets# Sampled ExamplesTest Passing (%)
RTLCoder [6]100024.4
OriGen [8]100053.5
", + "image_path": "78eb9c01e8aec3197f5771bc2618afcf24170b28ee21cbb3c8fa98cb91a33acc.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 311, + 99, + 563, + 122 + ], + "lines": [ + { + "bbox": [ + 311, + 99, + 563, + 122 + ], + "spans": [ + { + "bbox": [ + 311, + 99, + 563, + 122 + ], + "type": "text", + "content": "TABLE V: Test passing rates " + }, + { + "bbox": [ + 311, + 99, + 563, + 122 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 311, + 99, + 563, + 122 + ], + "type": "text", + "content": " of datasets released by prior work on a randomly sampled set of 1000 examples." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 10 + }, + { + "bbox": [ + 356, + 145, + 518, + 155 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 356, + 145, + 518, + 155 + ], + "spans": [ + { + "bbox": [ + 356, + 145, + 518, + 155 + ], + "type": "text", + "content": "VI. DISCUSSION AND FUTURE WORK" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 311, + 159, + 563, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 159, + 563, + 289 + ], + "spans": [ + { + "bbox": [ + 311, + 159, + 563, + 289 + ], + "type": "text", + "content": "While VeriCoder, combining unit test generation with feedback-driven refinement, improves the functional correctness of generated RTL code, it does not fully guarantee correctness. Synthetic test cases may fail to capture all possible edge cases. To address this challenge, future work should explore integrating formal verification techniques into the dataset construction pipeline to rigorously ensure the correctness of the generated code. Recent advancements have demonstrated promising results in translating natural language instructions into formal specifications [16], [38], as well as enforcing formal constraints during LLM-based code generation [39]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 311, + 290, + 563, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 290, + 563, + 409 + ], + "spans": [ + { + "bbox": [ + 311, + 290, + 563, + 409 + ], + "type": "text", + "content": "Moreover, most existing approaches, including VeriCoder, focus on small-scale RTL generation. However, practical hardware development often involves large, repository-level codebases with intricate cross-file dependencies and requirements for long-range context [40]–[42]. Recent work has begun to address these challenges through techniques such as combining fine-tuning with retrieval-augmented RTL code generation [43], [44]. Extending VeriCoder's unit test generation and feedback-directed refinement components to the repository scale will enable LLMs to handle more real-world RTL tasks." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 311, + 410, + 563, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 410, + 563, + 540 + ], + "spans": [ + { + "bbox": [ + 311, + 410, + 563, + 540 + ], + "type": "text", + "content": "Furthermore, reinforcement learning (RL) offers a powerful framework for further optimizing large language models' performance beyond what is achievable through supervised fine-tuning alone. Recent studies have demonstrated the effectiveness of RL in enhancing LLM-based code generation by incorporating diverse forms of feedback, such as test case outcomes, compiler diagnostics, and formal verification results [32], [45], [46]. Building on this progress, future work could investigate applying RL techniques to the VeriCoder dataset, using the accompanying test cases as a feedback signal to iteratively improve RTL code generation quality." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 397, + 555, + 477, + 564 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 555, + 477, + 564 + ], + "spans": [ + { + "bbox": [ + 397, + 555, + 477, + 564 + ], + "type": "text", + "content": "VII. CONCLUSION" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 311, + 569, + 563, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 569, + 563, + 747 + ], + "spans": [ + { + "bbox": [ + 311, + 569, + 563, + 747 + ], + "type": "text", + "content": "Recent advances in Large Language Models (LLMs) have opened new possibilities for Electronic Design Automation (EDA), particularly in RTL code generation. However, most existing datasets emphasize syntactic validity while overlooking functional correctness, which limits the effectiveness of finetuned models. We introduce VERICODER, a model fine-tuned on a dataset with 125,000 examples that is validated for functional correctness. This dataset is constructed using a feedback-directed refinement pipeline guided by a teacher LLM, which generates and iteratively updates both RTL designs and unit tests until the design passes simulation. The resulting dataset consists of functionally validated triples comprising a natural language specification, an RTL implementation, and a passing test. Fine-tuned on this dataset, VERICODER achieves state-of-the-art results on two established RTL benchmarks," + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 25, + 563, + 31 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 25, + 563, + 31 + ], + "spans": [ + { + "bbox": [ + 558, + 25, + 563, + 31 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 55, + 301, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 55, + 301, + 128 + ], + "spans": [ + { + "bbox": [ + 45, + 55, + 301, + 128 + ], + "type": "text", + "content": "yielding relative improvements of up to " + }, + { + "bbox": [ + 45, + 55, + 301, + 128 + ], + "type": "inline_equation", + "content": "71.7\\%" + }, + { + "bbox": [ + 45, + 55, + 301, + 128 + ], + "type": "text", + "content": " on VerilogEval and " + }, + { + "bbox": [ + 45, + 55, + 301, + 128 + ], + "type": "inline_equation", + "content": "27.4\\%" + }, + { + "bbox": [ + 45, + 55, + 301, + 128 + ], + "type": "text", + "content": " on RTLLM. An ablation study confirms the impact of functional validation on model performance, underscoring the importance of high-quality training data. Future work may explore formal verification and reinforcement learning to further advance AI-assisted hardware design." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 129, + 146, + 219, + 156 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 146, + 219, + 156 + ], + "spans": [ + { + "bbox": [ + 129, + 146, + 219, + 156 + ], + "type": "text", + "content": "ACKNOWLEDGMENT" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 164, + 302, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 164, + 302, + 201 + ], + "spans": [ + { + "bbox": [ + 45, + 164, + 302, + 201 + ], + "type": "text", + "content": "We thank Samantha Archer, Yao Hsiao, Mohammad Rahmani Fadiheh and Subhasish Mitra for their discussions. This work was partially supported by a Google Research Award." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 144, + 220, + 203, + 231 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 144, + 220, + 203, + 231 + ], + "spans": [ + { + "bbox": [ + 144, + 220, + 203, + 231 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 239, + 301, + 747 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 52, + 239, + 301, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 239, + 301, + 267 + ], + "spans": [ + { + "bbox": [ + 52, + 239, + 301, + 267 + ], + "type": "text", + "content": "[1] M. Liu, T.-D. Ene, R. Kirby, C. Cheng, N. Pinckney, R. Liang, J. Alben, H. Anand, S. Banerjee, I. Bayraktaroglu et al., \"Chipnemo: Domain-adapted llms for chip design,\" arXiv preprint arXiv:2311.00176, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 267, + 301, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 267, + 301, + 303 + ], + "spans": [ + { + "bbox": [ + 52, + 267, + 301, + 303 + ], + "type": "text", + "content": "[2] L. Chen, Y. Chen, Z. Chu, W. Fang, T.-Y. Ho, R. Huang, Y. Huang, S. Khan, M. Li, X. Li et al., \"The dawn of ai-native eda: Opportunities and challenges of large circuit models,\" arXiv preprint arXiv:2403.07257, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 303, + 301, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 303, + 301, + 339 + ], + "spans": [ + { + "bbox": [ + 52, + 303, + 301, + 339 + ], + "type": "text", + "content": "[3] R. Zhong, X. Du, S. Kai, Z. Tang, S. Xu, H.-L. Zhen, J. Hao, Q. Xu, M. Yuan, and J. Yan, \"Llm4eda: Emerging progress in large language models for electronic design automation,\" arXiv preprint arXiv:2401.12224, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 339, + 301, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 339, + 301, + 367 + ], + "spans": [ + { + "bbox": [ + 51, + 339, + 301, + 367 + ], + "type": "text", + "content": "[4] Z. He and B. Yu, “Large language models for eda: Future or mirage?” in Proceedings of the 2024 International Symposium on Physical Design, 2024, pp. 65–66." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 367, + 301, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 367, + 301, + 403 + ], + "spans": [ + { + "bbox": [ + 51, + 367, + 301, + 403 + ], + "type": "text", + "content": "[5] X. Yao, Y. Wang, X. Li, Y. Lian, R. Chen, L. Chen, M. Yuan, H. Xu, and B. Yu, \"Rtlwriter: Methodologies for large models aided rtl code optimization,\" in Proceedings of the 43rd IEEE/ACM International Conference on Computer-Aided Design, 2024, pp. 1-7." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 403, + 301, + 439 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 403, + 301, + 439 + ], + "spans": [ + { + "bbox": [ + 51, + 403, + 301, + 439 + ], + "type": "text", + "content": "[6] S. Liu, W. Fang, Y. Lu, J. Wang, Q. Zhang, H. Zhang, and Z. Xie, \"Rtlcoder: Fully open-source and efficient ltm-assisted rtl code generation technique,\" IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 439, + 301, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 439, + 301, + 476 + ], + "spans": [ + { + "bbox": [ + 51, + 439, + 301, + 476 + ], + "type": "text", + "content": "[7] S. Liu, W. Fang, Y. Lu, Q. Zhang, H. Zhang, and Z. Xie, \"Rtlcoder: Outperforming gpt-3.5 in design rtl generation with our open-source dataset and lightweight solution,\" in 2024 IEEE LLM Aided Design Workshop (LAD). IEEE, 2024, pp. 1-5." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 51, + 476, + 301, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 476, + 301, + 511 + ], + "spans": [ + { + "bbox": [ + 51, + 476, + 301, + 511 + ], + "type": "text", + "content": "[8] F. Cui, C. Yin, K. Zhou, Y. Xiao, G. Sun, Q. Xu, Q. Guo, D. Song, D. Lin, X. Zhang et al., \"Origen: Enhancing rtl code generation with code-to-code augmentation and self-reflection,\" arXiv preprint arXiv:2407.16237, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 511, + 301, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 511, + 301, + 548 + ], + "spans": [ + { + "bbox": [ + 51, + 511, + 301, + 548 + ], + "type": "text", + "content": "[9] M. Liu, N. Pinckney, B. Khailany, and H. Ren, \"Veriloggeval: Evaluating large language models for verilog code generation,\" in 2023 IEEE/ACM International Conference on Computer Aided Design (ICCAD). IEEE, 2023, pp. 1-8." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 548, + 301, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 548, + 301, + 584 + ], + "spans": [ + { + "bbox": [ + 47, + 548, + 301, + 584 + ], + "type": "text", + "content": "[10] Y. Lu, S. Liu, Q. Zhang, and Z. Xie, \"Rtllm: An open-source benchmark for design rtl generation with large language model,\" in 2024 29th Asia and South Pacific Design Automation Conference (ASP-DAC). IEEE, 2024, pp. 722-727." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 584, + 301, + 612 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 584, + 301, + 612 + ], + "spans": [ + { + "bbox": [ + 47, + 584, + 301, + 612 + ], + "type": "text", + "content": "[11] Y. Tsai, M. Liu, and H. Ren, \"Rtlfixer: Automatically fixing rtI syntax errors with large language model,\" in Proceedings of the 61st ACM/IEEE Design Automation Conference, 2024, pp. 1-6." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 612, + 301, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 612, + 301, + 639 + ], + "spans": [ + { + "bbox": [ + 47, + 612, + 301, + 639 + ], + "type": "text", + "content": "[12] Y. Liao, T. Adegbija, and R. Lysecky, \"Are llms any good for high-level synthesis?\" in Proceedings of the 43rd IEEE/ACM International Conference on Computer-Aided Design, 2024, pp. 1-8." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 639, + 301, + 675 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 639, + 301, + 675 + ], + "spans": [ + { + "bbox": [ + 47, + 639, + 301, + 675 + ], + "type": "text", + "content": "[13] Y. Fu, Y. Zhang, Z. Yu, S. Li, Z. Ye, C. Li, C. Wan, and Y. C. Lin, \"Gpt4aigchip: Towards next-generation ai accelerator design automation via large language models,\" in 2023 IEEE/ACM International Conference on Computer Aided Design (ICCAD). IEEE, 2023, pp. 1-9." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 675, + 301, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 675, + 301, + 712 + ], + "spans": [ + { + "bbox": [ + 47, + 675, + 301, + 712 + ], + "type": "text", + "content": "[14] Z. Yan, Y. Qin, X. S. Hu, and Y. Shi, \"On the viability of using llms for sw/hw co-design: An example in designing cim dnn accelerators,\" in 2023 IEEE 36th International System-on-Chip Conference (SOCC). IEEE, 2023, pp. 1-6." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "spans": [ + { + "bbox": [ + 47, + 712, + 301, + 747 + ], + "type": "text", + "content": "[15] Z. Liang, J. Cheng, R. Yang, H. Ren, Z. Song, D. Wu, X. Qian, T. Li, and Y. Shi, \"Unleashing the potential of llms for quantum computing: A study in quantum architecture design,\" arXiv preprint arXiv:2307.08191, 2023." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 56, + 564, + 747 + ], + "type": "list", + "angle": 0, + "index": 42, + "blocks": [ + { + "bbox": [ + 310, + 56, + 564, + 93 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 56, + 564, + 93 + ], + "spans": [ + { + "bbox": [ + 310, + 56, + 564, + 93 + ], + "type": "text", + "content": "[16] M. Cosler, C. Hahn, D. Mendoza, F. Schmitt, and C. Trippel, \"nl2spec: Interactively translating unstructured natural language to temporal logics with large language models,\" in International Conference on Computer Aided Verification. Springer, 2023, pp. 383-396." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 310, + 93, + 564, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 93, + 564, + 129 + ], + "spans": [ + { + "bbox": [ + 310, + 93, + 564, + 129 + ], + "type": "text", + "content": "[17] C. Sun, C. Hahn, and C. Trippel, \"Towards improving verification productivity with circuit-aware translation of natural language to systemverilog assertions,\" in First International Workshop on Deep Learning-aided Verification, 2023." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 310, + 129, + 564, + 164 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 129, + 564, + 164 + ], + "spans": [ + { + "bbox": [ + 310, + 129, + 564, + 164 + ], + "type": "text", + "content": "[18] H. Wu, Z. He, X. Zhang, X. Yao, S. Zheng, H. Zheng, and B. Yu, \"Chateda: A large language model powered autonomous agent for eda,\" IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 310, + 164, + 564, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 164, + 564, + 191 + ], + "spans": [ + { + "bbox": [ + 310, + 164, + 564, + 191 + ], + "type": "text", + "content": "[19] Z. Xiao, X. He, H. Wu, B. Yu, and Y. Guo, \"Eda-copilot: A ragpowered intelligent assistant for eda tools,\" ACM Transactions on Design Automation of Electronic Systems, 2025." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 191, + 564, + 227 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 191, + 564, + 227 + ], + "spans": [ + { + "bbox": [ + 310, + 191, + 564, + 227 + ], + "type": "text", + "content": "[20] K. Xu, J. Sun, Y. Hu, X. Fang, W. Shan, X. Wang, and Z. Jiang, \"Meic: Re-thinking rtl debug automation using llms,\" in Proceedings of the 43rd IEEE/ACM International Conference on Computer-Aided Design, 2024, pp. 1-9." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 227, + 564, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 227, + 564, + 255 + ], + "spans": [ + { + "bbox": [ + 310, + 227, + 564, + 255 + ], + "type": "text", + "content": "[21] R. Li, L. B. Allal, Y. Zi, N. Muennighoff, D. Kocetkov, C. Mou, M. Marone, C. Akiki, J. Li, J. Chim et al., \"Starcoder: may the source be with you!\" arXiv preprint arXiv:2305.06161, 2023." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 255, + 564, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 255, + 564, + 281 + ], + "spans": [ + { + "bbox": [ + 310, + 255, + 564, + 281 + ], + "type": "text", + "content": "[22] A. Lozhkov, R. Li, L. B. Allal, F. Cassano, J. Lamy-Poirier, N. Tazi, A. Tang, D. Pykhtar, J. Liu, Y. Wei et al., \"Starcoder 2 and the stack v2: The next generation,\" arXiv preprint arXiv:2402.19173, 2024." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 281, + 564, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 281, + 564, + 308 + ], + "spans": [ + { + "bbox": [ + 310, + 281, + 564, + 308 + ], + "type": "text", + "content": "[23] E. Dehaerne, B. Dey, S. Halder, and S. De Gendt, “A deep learning framework for verilog autocompletion towards design and verification automation,” arXiv preprint arXiv:2304.13840, 2023." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 308, + 564, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 308, + 564, + 335 + ], + "spans": [ + { + "bbox": [ + 310, + 308, + 564, + 335 + ], + "type": "text", + "content": "[24] Z. Pei, H.-L. Zhen, M. Yuan, Y. Huang, and B. Yu, \"Betterv: Controlled verilog generation with discriminative guidance,\" arXiv preprint arXiv:2402.03375, 2024." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 335, + 564, + 371 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 335, + 564, + 371 + ], + "spans": [ + { + "bbox": [ + 310, + 335, + 564, + 371 + ], + "type": "text", + "content": "[25] S. Thakur, B. Ahmad, Z. Fan, H. Pearce, B. Tan, R. Karri, B. Dolan-Gavitt, and S. Garg, \"Benchmarking large language models for automated verilog RTL code generation,\" in 2023 Design, Automation & Test in Europe Conference & Exhibition (DATE). IEEE, 2023, pp. 1-6." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 371, + 564, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 371, + 564, + 407 + ], + "spans": [ + { + "bbox": [ + 310, + 371, + 564, + 407 + ], + "type": "text", + "content": "[26] S. Thakur, B. Ahmad, H. Pearce, B. Tan, B. Dolan-Gavitt, R. Karri, and S. Garg, \"Verigen: A large language model for verilog code generation,\" ACM Transactions on Design Automation of Electronic Systems, vol. 29, no. 3, pp. 1-31, 2024." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 407, + 564, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 407, + 564, + 452 + ], + "spans": [ + { + "bbox": [ + 310, + 407, + 564, + 452 + ], + "type": "text", + "content": "[27] K. Chang, K. Wang, N. Yang, Y. Wang, D. Jin, W. Zhu, Z. Chen, C. Li, H. Yan, Y. Zhou et al., \"Data is all you need: Finetuning llms for chip design via an automated design-data augmentation framework,\" in Proceedings of the 61st ACM/IEEE Design Automation Conference, 2024, pp. 1-6." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 452, + 564, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 452, + 564, + 479 + ], + "spans": [ + { + "bbox": [ + 310, + 452, + 564, + 479 + ], + "type": "text", + "content": "[28] E. J. Hu, Y. Shen, P. Wallis, Z. Allen-Zhu, Y. Li, S. Wang, L. Wang, W. Chen et al., “Lora: Low-rank adaptation of large language models.” *ICLR*, vol. 1, no. 2, p. 3, 2022." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 479, + 564, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 479, + 564, + 523 + ], + "spans": [ + { + "bbox": [ + 310, + 479, + 564, + 523 + ], + "type": "text", + "content": "[29] M. Liu, Y.-D. Tsai, W. Zhou, and H. Ren, \"Craftrtl: High-quality synthetic data generation for verilog code models with correct-by-construction non-textual representations and targeted code repair,\" ArXiv, vol. abs/2409.12993, 2024. [Online]. Available: https://api_semanticscholar.org/CorpusID:272770433" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 310, + 523, + 564, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 523, + 564, + 550 + ], + "spans": [ + { + "bbox": [ + 310, + 523, + 564, + 550 + ], + "type": "text", + "content": "[30] Y. Zhang, Z. Yu, Y. Fu, C. Wan, and Y. C. Lin, \"Mg-verilog: Multi-grained dataset towards enhanced llm-assisted verilog generation,\" in 2024 IEEE LLM Aided Design Workshop (LAD). IEEE, 2024, pp. 1-5." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 310, + 550, + 564, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 550, + 564, + 577 + ], + "spans": [ + { + "bbox": [ + 310, + 550, + 564, + 577 + ], + "type": "text", + "content": "[31] E. Goh, M. Xiang, I. Wey, T. H. Teo et al., “From english to asi: Hardware implementation with large language model,” arXiv preprint arXiv:2403.07039, 2024." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 310, + 577, + 564, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 577, + 564, + 613 + ], + "spans": [ + { + "bbox": [ + 310, + 577, + 564, + 613 + ], + "type": "text", + "content": "[32] S. Liu, Y. Lu, W. Fang, M. Li, and Z. Xie, \"Openllm-rtl: Open dataset and benchmark for llm-aided design rtl generation,\" in Proceedings of the 43rd IEEE/ACM International Conference on Computer-Aided Design, 2024, pp. 1-9." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 310, + 613, + 564, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 613, + 564, + 649 + ], + "spans": [ + { + "bbox": [ + 310, + 613, + 564, + 649 + ], + "type": "text", + "content": "[33] C. Deng, Y.-D. Tsai, G.-T. Liu, Z. Yu, and H. Ren, \"Scalertl: Scaling llms with reasoning data and test-time compute for accurate rtl code generation,\" ArXiv, vol. abs/2506.05566, 2025. [Online]. Available: https://api-semanticscholar.org/CorpusID:279243692" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 310, + 649, + 564, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 649, + 564, + 685 + ], + "spans": [ + { + "bbox": [ + 310, + 649, + 564, + 685 + ], + "type": "text", + "content": "[34] Y. Liu, C. Xu, Y. Zhou, Z. Li, and Q. Xu, \"Deeprl: Bridging verilog understanding and generation with a unified representation model,\" ArXiv, vol. abs/2502.15832, 2025. [Online]. Available: https://api-semanticscholar.org/CorpusID:276574886" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 685, + 564, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 685, + 564, + 721 + ], + "spans": [ + { + "bbox": [ + 310, + 685, + 564, + 721 + ], + "type": "text", + "content": "[35] N. Wang, B. Yao, J. Zhou, X. Wang, Z. Jiang, and N. Guan, \"Large language model for verilog generation with golden code feedback,\" ArXiv, vol. abs/2407.18271, 2024. [Online]. Available: https://api_semanticscholar.org/CorpusID:271516462" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 310, + 720, + 564, + 747 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 720, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 310, + 720, + 564, + 747 + ], + "type": "text", + "content": "[36] M. Gao, J. Zhao, Z. Lin, W. Ding, X. Hou, Y. Feng, C. Li, and M. Guo, \"Autovcoder: A systematic framework for automated verilog code generation using llms,\" 2024 IEEE 42nd International Conference" + } + ] + } + ], + "index": 41 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 558, + 24, + 563, + 31 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 24, + 563, + 31 + ], + "spans": [ + { + "bbox": [ + 558, + 24, + 563, + 31 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 57, + 301, + 388 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 64, + 57, + 301, + 75 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 57, + 301, + 75 + ], + "spans": [ + { + "bbox": [ + 64, + 57, + 301, + 75 + ], + "type": "text", + "content": "on Computer Design (ICCD), pp. 162-169, 2024. [Online]. Available: https://api(semanticscholar.org/CorpusID:271516210" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 47, + 76, + 301, + 120 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 76, + 301, + 120 + ], + "spans": [ + { + "bbox": [ + 47, + 76, + 301, + 120 + ], + "type": "text", + "content": "[37] Y. Zhao, D. Huang, C. Li, P. Jin, Z. Nan, T. Ma, L. Qi, Y. Pan, Z. Zhang, R. Zhang, X. Zhang, Z. Du, Q. Guo, X. Hu, and Y. Chen, \"Codev: Empowering llms withhdl generation through multi-level summarization,\" 2024. [Online]. Available: https://api_semanticscholar.org/CorpusID:271212791" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 120, + 301, + 155 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 120, + 301, + 155 + ], + "spans": [ + { + "bbox": [ + 47, + 120, + 301, + 155 + ], + "type": "text", + "content": "[38] D. Mendoza, C. Hahn, and C. Trippel, \"Translating natural language to temporal logics with large language models and model checkers,\" in 2024 Formal Methods in Computer-Aided Design (FMCAD), 2024, pp. 1-11." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 156, + 301, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 156, + 301, + 182 + ], + "spans": [ + { + "bbox": [ + 47, + 156, + 301, + 182 + ], + "type": "text", + "content": "[39] P. Aggarwal, B. Parno, and S. Welleck, \"Alphaverus: Bootstrapping formally verified code generation through self-improving translation and treefinement,\" arXiv preprint arXiv:2412.06176, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 182, + 301, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 182, + 301, + 209 + ], + "spans": [ + { + "bbox": [ + 47, + 182, + 301, + 209 + ], + "type": "text", + "content": "[40] C. E. Jimenez, J. Yang, A. Wettig, S. Yao, K. Pei, O. Press, and K. Narasimhan, \"Swe-bench: Can language models resolve real-world github issues?\" arXiv preprint arXiv:2310.06770, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 209, + 301, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 209, + 301, + 245 + ], + "spans": [ + { + "bbox": [ + 47, + 209, + 301, + 245 + ], + "type": "text", + "content": "[41] T. Suresh, R. G. Reddy, Y. Xu, Z. Nussbaum, A. Mulyar, B. Duderstadt, and H. Ji, \"Cornstack: High-quality contrastive data for better code retrieval and reranking,\" in The Thirteenth International Conference on Learning Representations, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 245, + 301, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 245, + 301, + 272 + ], + "spans": [ + { + "bbox": [ + 47, + 245, + 301, + 272 + ], + "type": "text", + "content": "[42] N. Jain, M. Shetty, T. Zhang, K. Han, K. Sen, and I. Stoica, “R2e: Turning any github repository into a programming agent environment,” in ICML, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 272, + 301, + 299 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 272, + 301, + 299 + ], + "spans": [ + { + "bbox": [ + 47, + 272, + 301, + 299 + ], + "type": "text", + "content": "[43] P. Wu, N. Guo, J. Lv, X. Xiao, and X. Ye, \"RtlrepEncoder: Repository-level rtl code completion through the combination of fine-tuning and retrieval augmentation,\" arXiv preprint arXiv:2504.08862, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 299, + 301, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 299, + 301, + 334 + ], + "spans": [ + { + "bbox": [ + 47, + 299, + 301, + 334 + ], + "type": "text", + "content": "[44] Z. Li, C. Xu, Z. Shi, Z. Peng, Y. Liu, Y. Zhou, L. Zhou, C. Ma, J. Zhong, X. Wang et al., \"Deepcircuits: A comprehensive repository-level dataset for rtl code understanding, generation, and ppa analysis,\" arXiv preprint arXiv:2502.18297, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 334, + 301, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 334, + 301, + 361 + ], + "spans": [ + { + "bbox": [ + 47, + 334, + 301, + 361 + ], + "type": "text", + "content": "[45] N. Wang, B. Yao, J. Zhou, X. Wang, Z. Jiang, and N. Guan, “Large language model for verilog generation with golden code feedback,” arXiv preprint arXiv:2407.18271, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 361, + 301, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 361, + 301, + 388 + ], + "spans": [ + { + "bbox": [ + 47, + 361, + 301, + 388 + ], + "type": "text", + "content": "[46] J. Wang, Z. Zhang, Y. He, Y. Song, T. Shi, Y. Li, H. Xu, K. Wu, G. Qian, Q. Chen et al., “Enhancing code llms with reinforcement learning in code generation,” arXiv preprint arXiv:2412.20367, 2024." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "spans": [ + { + "bbox": [ + 555, + 25, + 563, + 32 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15714/5ed595a1-6284-4695-8ba1-1bc55e6429ae_content_list.json b/data/2025/2504_15xxx/2504.15714/5ed595a1-6284-4695-8ba1-1bc55e6429ae_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..a3b323d2b041654ba1ad1294fcfecc5768a06b42 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/5ed595a1-6284-4695-8ba1-1bc55e6429ae_content_list.json @@ -0,0 +1,1244 @@ +[ + { + "type": "text", + "text": "Autonomous Control of Redundant Hydraulic Manipulator Using Reinforcement Learning with Action Feedback", + "text_level": 1, + "bbox": [ + 122, + 87, + 877, + 137 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Rohit Dhakate1, Christian Brommer1, Christoph Böhm1, Harald Gietler2, Stephan Weiss1, and Jan Steinbrener1", + "bbox": [ + 81, + 172, + 910, + 191 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract- This article presents an entirely data-driven approach for autonomous control of redundant manipulators with hydraulic actuation. The approach only requires minimal system information, which is inherited from a simulation model. The non-linear hydraulic actuation dynamics are modeled using actuator networks from the data gathered during the manual operation of the manipulator to effectively emulate the real system in a simulation environment. A neural network control policy for autonomous control, based on end-effector (EE) position tracking is then learned using Reinforcement Learning (RL) with Ornstein-Uhlenbeck process noise (OUNoise) for efficient exploration. The RL agent also receives feedback based on supervised learning of the forward kinematics which facilitates selecting the best suitable action from exploration. The control policy directly provides the joint variables as outputs based on provided target EE position while taking into account the system dynamics. The joint variables are then mapped to the hydraulic valve commands, which are then fed to the system without further modifications. The proposed approach is implemented on a scaled hydraulic forwarder crane with three revolute and one prismatic joint to track the desired position of the EE in 3-Dimensional (3D) space. With the emulated dynamics and extensive learning in simulation, the results demonstrate the feasibility of deploying the learned controller directly on the real system.", + "bbox": [ + 84, + 233, + 488, + 549 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "I. INTRODUCTION", + "text_level": 1, + "bbox": [ + 218, + 561, + 352, + 575 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hydraulic cranes are versatile heavy-duty manipulators that are omnipresent in construction, mining, agriculture, or forestry for lifting and transporting heavy objects. Automation by sensor retrofitting of these manipulators tackles not only challenging and dull, dangerous, dirty (DDD) tasks concerning the handling of raw materials but also brings economic benefits by increased productivity, and effortless system upgrades according to the desired functionality.", + "bbox": [ + 81, + 585, + 488, + 705 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "With the proposed approach, we are addressing the forest log transportation use case. The manipulator repeatedly performs a monotonous pick-and-place operation to collect and redistribute logs prepared by the harvester. Forwarder cranes mainly remain manually operated, despite continuous widespread automation in the industry. Manual operation", + "bbox": [ + 81, + 705, + 488, + 797 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/50a129a1473f93925858127e715013f22fb426c9ed9b317a9fba1f66608fdc8e.jpg", + "image_caption": [ + "Fig. 1. AutoLOG manipulator (1:5 scaled forest forwarder crane): Test-bed for our RL-based controller and manipulation tasks." + ], + "image_footnote": [], + "bbox": [ + 511, + 227, + 910, + 444 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "of such manipulators can be both mentally and physically exhausting, when producing constant, smooth and jerk free motion with joystick, since it requires complex coordination of several hydraulic cylinders [1]. Early automatic and semi-automatic solutions were presented by [2] using analytical methods.", + "bbox": [ + 504, + 494, + 911, + 585 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The barriers in automation of the forest industry can be traced not only towards complex and dynamic environments but also the complexity and variants of the manipulators depending on the manufacturer. In [1] the authors argue that the automation of the entire forwarding operation is complex as numerous tasks such as log recognition, log grasping point detection/selection and pick-and-place operations are involved. However, the authors conclude that the motion patterns of the manipulator's joints are, as expected, highly repetitive and can be automated using analytical methods. However, for analytical methods, an accurate system and environment model is of utmost necessity to achieve desired results, which could take a lot of effort and time given the complexity and redundant nature of the manipulator.", + "bbox": [ + 504, + 585, + 913, + 797 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advancements in reinforcement learning not only demonstrated their applications in video-games and simulations but also enabled physical robots to learn complex skills and perform operations in real-world environments. In robot manipulation, reinforcement learning is being extensively used to develop intelligent systems that only require minimal to no system and environment information.", + "bbox": [ + 504, + 797, + 913, + 902 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Rohit Dhakate, Christian Brommer, Christoph Böhm, Stephan Weiss and Jan Steinbrenner are with the Department of Smart Systems Technologies in the Control of Networked Systems Group, University of Klagenfurt, 9020 Klagenfurt, Austria {rohit.dhakate, christian.brommer, christoph.boehm, stephan.weiss, jan.steinbrenner}@ieee.org \n $^{2}$ Harald Gietler is with the Department of Smart Systems Technologies in the Sensors and Actuators Group, University of Klagenfurt, 9020 Klagenfurt, Austria {harald.gietler}@aau.at \nPre-print version, accepted June/2022, DOI follows ASAP ©IEEE.", + "bbox": [ + 81, + 810, + 488, + 926 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.15714v1 [cs.RO] 22 Apr 2025", + "bbox": [ + 22, + 262, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A. Related Work", + "text_level": 1, + "bbox": [ + 83, + 66, + 200, + 79 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Autonomous control for forest cranes has been extensively researched for the last two decades. In [3], the authors modeled the system dynamics using differential equations and applied non-linear control laws, and then performed a calibration and control tuning. While [4] also focuses on the aspect of forest crane automation, in addition to compensation for actuator nonlinearities, their main focus is on automating only the base joint (slewing motion). Until recently, all the work done towards automating forest cranes relied on model-based control. Current advancements in artificial intelligence (AI) brought substantial simplifications and advantages in tackling complex systems and problems. Within AI, RL algorithms that can be developed in a model-free domain have attracted several researchers and drove the field of automating heavy machinery with the use of AI.", + "bbox": [ + 81, + 85, + 488, + 311 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Several RL algorithms have been proposed to solve dynamic physical models in recent years. Among which model-free algorithms gained keen interest due to their nature of generalizing a solution to a category of a problem. In model-free methods, Q-learning based algorithms such as Deep Q-Network(DQN) [5], Quantile Regression DQN (QR-DQN) [6], learns the action-value function $Q(s,a)$ which is the expected value (cumulative discounted reward) of doing an action $a$ in state $s$ and then following the optimal policy, which is deterministic. Whereas Policy optimization-based algorithms such as Policy gradients, Advantage Actor-Critic (A2C)/ Asynchronous Advantage Actor-Critic (A3C) [7], Proximal Policy Optimization (PPO) [8], and Trust Region Policy Optimization (TRPO) [9], the agent learns directly the policy function that maps state to action. The policy is determined without using a value function. In recent years the application of RL for complex manipulation tasks has been carried out by several researchers. In [10] the authors implemented a TRPO algorithm for automating a hydraulic excavator. The learned control policy is validated by deploying it on the actual excavator. However, they do not control the base joint which limits the motion in 2D. The authors in [11] use PPO for learning a control policy, along with curriculum learning for grasping tasks. An energy optimization goal is also added in the reward function. However, the validation of the learned policy is conducted on the same simulation platform on which it had been trained.", + "bbox": [ + 81, + 311, + 488, + 718 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "B. Contribution", + "text_level": 1, + "bbox": [ + 83, + 726, + 194, + 739 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To the best of our knowledge, we present the first work for automation of a real forestry crane with artificial intelligence. Our work investigates the feasibility of applying an actuator-space control policy learned in simulation on a real-world, 4 degrees of freedom, kinematically redundant forestry crane manipulator. The learned control policy maps task-space goals directly to actuator-space commands by providing the target's cartesian position.", + "bbox": [ + 81, + 744, + 488, + 864 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We propose a generalized framework for autonomous control of redundant manipulators with highly non-linear hydraulic actuation. The main contributions of the proposed work are listed below,", + "bbox": [ + 81, + 866, + 488, + 926 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Fully data driven approach for position tracking controller of redundant hydraulic manipulator, with minimal system information, negating the need for analytical formulation of forward and inverse kinematics, which is a highly complex task with non-standard manipulators and is subject to change with manipulator models.", + "- Emulated hydraulic actuation dynamics to precisely map from cylinder displacement to joint angles and vice-versa, eliminating the need for formulating the cylinder-joint mapping using geometry.", + "- Improvement on baseline RL controller, with feedback to predicted actions from forward kinematics network using supervised learning, which directly outputs valve commands for the required target EE position.", + "- A Sim-2-Real deployment of simulation learnt control policy onto real manipulator directly without any adaptation. To the best of our knowledge, this is the first time a Sim-2-Real transfer of RL control policy is deployed on a heavy duty manipulator for 3D position tracking in real-world. The controller performs well in tracking circular and helical trajectories both in simulation and real-world experiments." + ], + "bbox": [ + 522, + 66, + 911, + 397 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "II. SYSTEM DESCRIPTION", + "text_level": 1, + "bbox": [ + 599, + 406, + 818, + 420 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The Autonomous Log Ordering through Robotic Grasping (AutoLOG) manipulator, which is a 1:5 scaled-down model of an actual forest forwarder crane, is used as a test-bed for autonomous manipulation tasks (see Fig 1). The manipulator is powered using hydraulic cylinders for its joint motions. With controllable 5 degrees of freedom, the EE can be controlled for its 3D position and yaw angle, making the manipulator redundant in nature. However, for our learning-based control task, we omit the yaw component and only focus on the 3D position of the EE. The yaw parameter of the system is application dependent, such as aligning the yaw with respect to the log orientation for pick-and-place tasks. A description of manipulator configuration is shown in Fig 2.", + "bbox": [ + 504, + 425, + 913, + 623 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/63c3b1d87892ad4296b482afa68b8dcc1c2a1f5e2da7c3c4954380f200625a47.jpg", + "image_caption": [ + "Fig. 2. Manipulator description: The figure displays the kinematic configuration of the manipulator. The manipulator has 4 revolute and 1 prismatic joint. All the joints in addition to the grapple are actuated using hydraulic cylinders." + ], + "image_footnote": [], + "bbox": [ + 568, + 645, + 851, + 851 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) Joint Angles / Displacement Measurements: For forest crane actuators, direct access to the inputs and outputs is not always available. Hence we retrofit our manipulator with exteroceptive sensors. The manipulator comprises three revolute and one prismatic joint. Just before the revolute joint 4, we have 2 orthogonal underactuated joints, which cause the gripper to sway freely in a 3D space. The joint states for revolute joints 2, 3, and prismatic joint are obtained by mapping the cylinder displacements to joint angles. We use Waycon SX50 draw-wire sensors to measure the cylinder displacements with a measurement error of $0.0002\\mathrm{mm}$ over a displacement of $1250\\mathrm{mm}$ . For revolute joint 1, we use a retrofitted inductance-based angular position sensor which provides absolute angle measurement with a maximum measurement error of 0.8 degrees [12].", + "2) Electric Control Valves: For autonomous control of the manipulator, the hydraulic proportional valves (electro-hydraulic) are controlled using a Pulse Width Modulation (PWM) control which changes the fluid flow in cylinders according to required joint values.", + "3) Requirements for Approach: Our proposed method requires minimal system information. Table I lists the inputs and outputs of our proposed approach." + ], + "bbox": [ + 99, + 65, + 488, + 444 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/456b6813ef49bd30421e1cdced8dc30bcdf85592d5a2c70c29cb5a80ed7b3e07.jpg", + "table_caption": [ + "TABLEI RL AGENT - INPUTS AND OUTPUTS" + ], + "table_footnote": [], + "table_body": "
ParametersInputsOutput
Joint valuesqtqt+1
Current EE positionXt
Target EE positionXt+1
", + "bbox": [ + 163, + 489, + 405, + 542 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "III. SIMULATION FRAMEWORK", + "text_level": 1, + "bbox": [ + 155, + 571, + 415, + 585 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We use CoppeliaSim (formerly V-REP) [13] as our simulation framework to train the RL agent. CoppeliaSim provides a wide range of functionalities and supports multiple physics engines including Bullet [14], ODE [15], Vortex [16] and Newton [17]. The simulation scene is generated using a Computer Aided Design (CAD) model of the manipulator. The scene is dynamically enabled using Bullet 2.78 physics engine to render our simulation. The simulator provides a kinematics calculation module to compute forward and inverse kinematics of the manipulator chain, however we only use the position information of the scene objects (joints and end-effector) for our observations. Observations from the simulator can be considered as measurement from our retrofitted sensors on the real system. To control the manipulator we use a python remote API Client.", + "bbox": [ + 81, + 594, + 488, + 820 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The simulation model is shown in Fig 3. The gripper (red object) is detached for simulations. Thus our simulation setup does not have the cylinder displacements as control inputs. Instead, the joint variables are provided directly to the simulator. However, the resulting joint variables from the learned controller are converted to cylinder displacements using the actuator network.", + "bbox": [ + 81, + 820, + 488, + 926 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/1918b898feb71111018760b82ae484f8dc74128bde4d2b4ff142827a7d7a3e3b.jpg", + "image_caption": [ + "Fig. 3. Simulation model in CoppeliaSim. The gripper (red object) is detached for training since the effects of gripper sway is out of the scope of the proposed approach." + ], + "image_footnote": [], + "bbox": [ + 547, + 61, + 874, + 277 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "IV. METHODS", + "text_level": 1, + "bbox": [ + 650, + 358, + 767, + 371 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A. Overview", + "text_level": 1, + "bbox": [ + 506, + 387, + 596, + 400 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Fig 4 shows an overview of our approach. We train an actuator and a forward network using supervised learning. The actuator network incorporates the non-linear dynamics involved in the hydraulic actuation and is trained to map cylinder displacement to joint variables and vice-versa. The forward network is a mapping from joint space to operation space of the manipulator. The RL agent (DDPG) is then trained in the simulation to reach a target 3D position from a random initial joint configuration. The trained RL agent is first evaluated for a trajectory tracking task in simulation and then is deployed on the real manipulator for final validation.", + "bbox": [ + 504, + 411, + 911, + 578 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/24de990de12ca065df1ad0249823ff8cc13ebce5db9034346c97011e859835da.jpg", + "image_caption": [ + "Sim-2-Real" + ], + "image_footnote": [], + "bbox": [ + 563, + 606, + 856, + 714 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/a592265274bdaf56173f88c5d5586737e10ee36a9263ef57f9b9fcd2cf99bb42.jpg", + "image_caption": [ + "Fig. 4. RL control architecture: The image shows an architecture overview of our proposed approach. The training is done completely offline on a simulation platform, it shows the interaction between forward network, RL agent and the simulation platform. Sim-2-Real transfer of the trained controller is validated by directly deploying it on the physical system." + ], + "image_footnote": [], + "bbox": [ + 563, + 726, + 854, + 834 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "B. Network Modelling", + "text_level": 1, + "bbox": [ + 83, + 66, + 238, + 80 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The two supervised learning networks facilitate our approach of learning based control.", + "bbox": [ + 81, + 85, + 488, + 116 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1) Actuator Network: The actuator network performs a bi-directional mapping between cylinder displacements and joint variables. Our RL agent outputs joint variables for the target goal, whereas low-level manipulator control takes valve commands (cylinder displacements) as control inputs.", + "2) Forward Network: The forward network takes current joint variables as inputs and returns the 3D position of EE." + ], + "bbox": [ + 99, + 119, + 488, + 253 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "C. Data Collection", + "text_level": 1, + "bbox": [ + 83, + 265, + 218, + 277 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "1) Actuator Network Data: For the actuator network, we collected input-output data from the real system during manual operation. We recorded the cylinder displacements using the retrofitted draw-wire sensors, and a motion capture system is used to measure the respective angles, since our system does not have an alternative for direct angle measurement for revolute joints 2 and 3. The cylinder control inputs were provided using a remote control designed for the manipulator. The data is collected with different cylinder velocities to capture the hydraulic actuation dynamics effectively. The collected data is believed to be incorporating all the non-linear dynamics involved in the mapping between cylinders and respective angles, see Fig 5.", + "bbox": [ + 101, + 284, + 490, + 496 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/bc475e9736badced79ff37cd0d7b6f2c30391f27b24b10ea198d63521742775c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 107, + 508, + 460, + 595 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/a8a1f8bf9611d114565d29e224f0c989988432c99613cb7176c03ee1f699d59d.jpg", + "image_caption": [ + "Fig. 5. Real data to train the actuator model is gathered from the physical manipulator. The figure shows the joint angles w.r.t. the cylinder displacement. The cylinder displacements are measured using a draw-wire sensor and corresponding joint angles are recorded using motion capture system." + ], + "image_footnote": [], + "bbox": [ + 107, + 602, + 457, + 691 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2) Forward Network Data: To train the forward network, we acquired the joint variables and EE position data autonomously by setting a random joint configuration for each data point and recording the EE position using motion capture, as shown in Fig 6. The collected data also gave an insight into the manipulator work-space.", + "bbox": [ + 99, + 775, + 488, + 867 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "D. Network Training", + "text_level": 1, + "bbox": [ + 83, + 877, + 230, + 891 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "1) Actuator Network Training: We train separate networks for each joint-cylinder mapping. Actuator network-2", + "bbox": [ + 99, + 896, + 488, + 926 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/0ff172a1e9706049a3ab566b4fa2dff61b7fbc3e448f9a68de3e2c9da7881ed9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 532, + 60, + 883, + 148 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/dc245afab5ffe8c91cbbb34839c3c74d38ee032740331754c65ec1f3448037f5.jpg", + "image_caption": [ + "Fig. 6. The image displays the real data recorded for forward kinematics in an autonomous fashion, to train our forward network. For each data-point random joint configuration is set and corresponding EE position is recorded using motion capture system. The sampled random configurations covers the full range of cylinder displacements." + ], + "image_footnote": [], + "bbox": [ + 532, + 152, + 883, + 242 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "maps joint2-cylinder2, while actuator network-3 maps joint3-cylinder3. The actuator network-2 is trained using a simple multi-layer perceptron (MLP) with 3 hidden layers (with 256-128-128 hidden units) and non-linear rectified linear unit (ReLU) activation. We used Adam optimizer with a learning rate of 1e-4. The model predicts the cylinder position for a given joint angle. Whereas the actuator network-3 uses an MLP with only 2 hidden layers(with 128-128 hidden units). Fig 7 and 8 shows the validation of trained actuator networks.", + "bbox": [ + 544, + 337, + 913, + 502 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/89769b68188785d41523fe5e36a853662b531cf8c3afc7b6b5218adb3a2c22b3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 532, + 527, + 883, + 616 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/959b617bf95a2fc023392858e28016f6decfa4ef0d154fc7e850bb39d842b26d.jpg", + "image_caption": [ + "Fig. 7. The figure shows validation results of trained actuator network for joint 2. Excluding error spikes at few instances the network precisely learned the cylinder-joint mapping." + ], + "image_footnote": [], + "bbox": [ + 537, + 625, + 883, + 710 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2) Forward Network Training: Our forward network is a multi-input-multi-output (MIMO) mapping from joint variables to EE position. The network is trained using an MLP with only 2 hidden layers (with 256-128 hidden units). Despite training the network on only 500 data points, Fig 9 shows that the generalization is very accurate with a maximum prediction error of only (0.0159, 0.0205, 0.0136)m in x, y, and z, respectively.", + "bbox": [ + 522, + 789, + 913, + 912 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/be285313dbfb81aa70b092241b850e1b5bb5c496a3fcb32cd71290df131f0e58.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 109, + 59, + 464, + 148 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/c735e05f9610377714b0e6df852a1b7937f1d8bbd2000a99a2aaf4e48ae95ded.jpg", + "image_caption": [ + "Fig. 8. Validation results of the trained actuator network for joint 3 are shown in this figure." + ], + "image_footnote": [], + "bbox": [ + 117, + 152, + 464, + 243 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/19833e38b857d9913051ebc6cc3f4945e35d8a0150770467823b8209669f8666.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 107, + 284, + 464, + 340 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/bd54f262f80aa7631a79e5f18caf190a7f560cf3957649dcad75541910accba9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 109, + 343, + 462, + 398 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/0de451a910d927eb07699ac09f4a7436643b76c64c8749231ddd37eb061202c5.jpg", + "image_caption": [ + "Fig. 9. Figure shows evaluation of the forward network. With a multi-input structure involving 4 joint variables, the network trained very efficiently to return 3D position of the EE." + ], + "image_footnote": [], + "bbox": [ + 127, + 402, + 462, + 467 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "E. Reinforcement Learning Controller", + "text_level": 1, + "bbox": [ + 83, + 523, + 344, + 537 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our proposed learning-based controller uses RL to synthesize a model-free task-space position tracking controller. The RL controller learns the inverse kinematics of the manipulator, which cannot be formulated analytically without any optimization objectives due to the redundant nature of the manipulator.", + "bbox": [ + 81, + 542, + 488, + 633 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Reinforcement Learning Preliminaries:", + "bbox": [ + 99, + 633, + 362, + 648 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We formalized our RL problem as a Markov decision process (MDP), which is a discrete-time stochastic control process. We use MDP, which provides a mathematical framework for predicting outcomes where the environment is fully observable. The MDP is characterized by,", + "bbox": [ + 81, + 648, + 488, + 724 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- state $(s)$ : state of the agent in the environment", + "- action $(a)$ : predicted/ conducted action by the agent", + "- reward $(r)$ : a scalar valued reward based on performed action and achieved state", + "- policy $(\\pi(s|a))$ : decision making function of state-action pair" + ], + "bbox": [ + 99, + 727, + 486, + 816 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A simple actor-critic architecture is shown in Fig 10.", + "bbox": [ + 83, + 820, + 444, + 835 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "At a given discrete time step $t$ , the state of the system is given by, $s_t \\in S$ . The agent makes an observation of the environment $o_t \\in O$ . Performing an action $a_t \\in A$ according to the policy distribution $\\pi(a|s)$ , the agent receives an immediate scalar reward $r_t(s_t, a_t)$ according to the specified reward function $R(s, a)$ providing an updated state $s_{t+1}' \\in S$ .", + "bbox": [ + 81, + 835, + 488, + 926 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/200646669cbdd5936b36353cd83b3d28e8d93a92f7ee0ed87039448f6357d32d.jpg", + "image_caption": [ + "Fig. 10. A simple architecture of actor-critic method of RL approach is described. It shows the main operation of any RL based algorithm, with state, action, reward, agent and environment being the main components of an RL algorithm." + ], + "image_footnote": [], + "bbox": [ + 602, + 59, + 820, + 165 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The goal of RL algorithms is to find the optimal policy $\\pi^{*}(a|s)$ , such that the agent takes the optimal action at any given state in order to maximize the expected return. Here, the deep RL approach involves parameterizing the policy $\\pi$ as a neural network $\\pi(\\theta)$ with parameters $\\theta \\in \\Theta$ . The resulting policy approximator outputs a vector of actuator-space control signals at each time step.", + "bbox": [ + 504, + 231, + 911, + 335 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We use DDPG [18] because it combines both Q-learning and policy optimization approaches. DDPG has an actor-critic architecture, where the critic network determines the Q value, and the actor network determines the actions to be taken. The actor network in DDPG simply uses the negative average Q value generated by the critic model as a loss and learns to generate actions to maximize the Q value in each state. An experience replay buffer stores all the experiences and draws a batch to train the networks. To the DDPG baseline, we added feedback to the predicted actions using our forward network for efficient exploration. Using the current policy, we predict a specified number of actions, which is then fed to the forward network to find the best actions based on the norm distance between EE position from predicted actions and the target position. The selected action is then used to perform a $(s_t, a_t, r_t, s_{t+1}')$ step to get the next state $s_{t+1}'$ . The contents of our system state and actions are described in Table II.", + "bbox": [ + 504, + 337, + 913, + 609 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/eb870b6c3001c7d2105bed50b781bb7405a0fd0d2b68ffdd67a1ee8ccb5aa83a.jpg", + "table_caption": [ + "TABLE II DDPG ALGORITHM COMPONENTS" + ], + "table_footnote": [], + "table_body": "
ParametersContentsDimension
StateObservation: Joint variables4x1
Achieved goal: Current EE3x1
Desired goal: Target EE3x1
ActionsJoint Variables: [J1, J2, J3, J4]4x1
", + "bbox": [ + 532, + 656, + 883, + 724 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We give a constant reward $r_t^{step}$ for each time-step which improves the learning performance. A distance reward $r_t^{dist}$ which helps to learn the reaching task is given based on the norm distance between current and target EE position. We also add a joint limit avoidance reward $r_t^{jlim}$ which discourages the agent from learning infeasible joint configurations. A complete episode reward is the sum of all the aforementioned reward functions.", + "bbox": [ + 504, + 741, + 913, + 861 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our reward function is defined as follows,", + "bbox": [ + 522, + 877, + 810, + 891 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nr _ {t} = r _ {\\mathrm {t}} ^ {\\text {s t e p}} + r _ {\\mathrm {t}} ^ {\\text {d i s t}} + r _ {\\mathrm {t}} ^ {\\text {j l i m}} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 622, + 893, + 911, + 912 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where,", + "bbox": [ + 99, + 66, + 150, + 79 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} r _ {t} ^ {s t e p} = 0. 0 0 1 \\\\ r _ {t} ^ {d i s t} = - \\left(\\left\\| x _ {t + 1} - x _ {t} \\right\\| _ {2}\\right) + 0. 0 0 2 \\\\ r _ {t} ^ {j l i m} = \\left\\{ \\begin{array}{l l} - 0. 0 0 0 5, & \\text {i f , j > j _ {m a x} o r j < j _ {m i n}} \\\\ 0, & \\text {i f , j _ {m i n} \\geq j \\leq j _ {m a x}} \\end{array} \\right. \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 99, + 90, + 421, + 170 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We start the simulation with random initial joint configuration, and our RL agent acquires observations from the simulation environment which forms our system state. The actor network then generates random actions based on the current state and exploration noise. These actions (joint variables as control inputs) are then filtered using the forward network to select the best action, which is then carried out in a simulation step. The simulation is carried out at $100\\mathrm{hz}$ , the same rate our real system is operated.", + "bbox": [ + 81, + 174, + 488, + 309 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The DDPG algorithm is claimed to be sensitive to hyperparameters, which we observed during tuning of the hyperparameters. In [19], it is shown that DDPG with tuned hyperparameters outperforms several other policy optimization algorithms in stable environments. We modified the hyperparameters from the stable baseline parameters to suit our training environment. Table III shows the hyperparameters used for our system.", + "bbox": [ + 81, + 309, + 488, + 431 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/e9a0c0fe0799e18c262aeb1a0ffd97db46212cd8b4e8d774269a4d11c46f6469.jpg", + "table_caption": [ + "TABLE III ALGORITHM HYPERPARAMETERS" + ], + "table_footnote": [], + "table_body": "
ParametersVariableValues
Number of episodesn Episodes1500
Number of stepsn Steps1000
Buffer sizenbuffer1e + 06
Batch sizenbatch1024
Discount factorγ0.99
Soft target updateτ1e - 03
Actor learning ratelrac1e - 03
Critic learning ratelrcr1e - 03
OU Noiseσ0.1
", + "bbox": [ + 148, + 479, + 421, + 604 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our complete approach is described in Algorithm 1.", + "bbox": [ + 98, + 619, + 455, + 636 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "V. RESULTS", + "text_level": 1, + "bbox": [ + 233, + 643, + 336, + 657 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "A. Simulation Results", + "text_level": 1, + "bbox": [ + 81, + 665, + 235, + 679 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We trained two different policies, Policy 1 (with action feedback) and Policy 2 (without action feedback) for 1500 episodes with randomly sampled targets from the manipulator work-space. All the hyper-parameters and simulation parameters are kept identical, with the feedback to the explored actions being the only distinction between the two policies.", + "bbox": [ + 81, + 684, + 488, + 789 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Fig 11 shows the cumulative reward for both the policies during the training episodes. Policy 1 constantly acquires better rewards than Policy 2 for each episode, validating our approach of efficient exploration using action feedback.", + "bbox": [ + 81, + 790, + 488, + 849 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We validated both policies for tracking a helical trajectory. From the trajectory tracking results shown in Fig 12, it can be seen that the tracking accuracy for policy 1 is better than policy 2. The absolute tracking error is shown in 13. The Root Mean Squared Error (RMSE) for Policy 1 is", + "bbox": [ + 81, + 851, + 490, + 926 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/eae163ea51a695c45611d0e685ac65e614c7797a6b91db3920d1340686ed9b34.jpg", + "image_caption": [ + "Fig. 11. Episode training rewards for Policy 1 (with feedback) and Policy 2 (without feedback) are shown. It is evident from the figure that Policy 1 is exploring efficiently because of the provided feedback. The feedback assists in selecting a meaningful action exploration." + ], + "image_footnote": [], + "bbox": [ + 532, + 59, + 885, + 244 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "[0.017, 0.008, 0.01], whereas for Policy 2 is [0.031, 0.017, 0.02]. Though we trained our RL agent within a defined actual manipulator work-space, we observed that the learned policy generalized the target reaching task and could perform trajectory tracking even outside the work-space on which it is trained.", + "bbox": [ + 504, + 310, + 913, + 401 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/1766108b659a76a1a9c6a84cf6d74f5cb68b2a8323ec8343f166cc285ac067f1.jpg", + "image_caption": [ + "Fig. 12. In this figure we are comparing the 2 trained policies on a trajectory tracking task in simulation. Policy 1 performs better in tracking the helical trajectory in contrast to Policy 2." + ], + "image_footnote": [], + "bbox": [ + 570, + 411, + 854, + 577 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/282b6dce16d75d3572a07c67522f78d93b534fc7ee04ddcb62da68ef11c01a0d.jpg", + "image_caption": [ + "Fig. 13. The image shows the trajectory tracking error for Policy 1 and Policy 2." + ], + "image_footnote": [], + "bbox": [ + 531, + 647, + 887, + 832 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "B. Real World Experiments", + "text_level": 1, + "bbox": [ + 506, + 876, + 697, + 892 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We deployed Policy 1 directly on the real manipulator without any modifications to the outputs from the learned", + "bbox": [ + 504, + 896, + 913, + 926 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/1ef568fffce5dfb948db8e81ebb41714d7215b2de0a59011d3078cd0b7e6aca7.jpg", + "table_caption": [ + "TABLE IV TRAJECTORY TRACKING ERRORS" + ], + "table_footnote": [], + "table_body": "
ExperimentMax. Error (mm)
Simulation27.2, 14.5, 26.3
Real-World75.2, 80.1, 73.1
", + "bbox": [ + 184, + 97, + 383, + 138 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "policy. We validated our learning-based controller approach in real-world experiments by tracking circular and helical trajectories.", + "bbox": [ + 81, + 167, + 488, + 213 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "As we trained our control policy by detaching the gripper in simulation, we did not account for the dynamic sway of the gripper when the manipulator is in motion, which is currently out of the scope of our proposed approach. The real-world trajectory tracking experiments shows that the manipulator is successfully tracking the target trajectory, see Fig 14, however, the unmodeled and unaccounted sway induced some tracking errors during the motion see 15. Table IV shows the maximum tracking error for the helical trajectory in simulation and real-world using our learned controller.", + "bbox": [ + 81, + 214, + 488, + 378 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2f78940a880e512f37387b0be3e6118bcf2d143d524f78be3a4bc1f6b80a1417.jpg", + "image_caption": [ + "Fig. 14. The figure displays the real-world experiment results of trajectory tracking from deployed Policy 1 onto the manipulator. The tracking is performed well given the harsh dynamic conditions of our system. The results validate the Sim-2-Real transfer of our learning-based control approach." + ], + "image_footnote": [], + "bbox": [ + 109, + 390, + 464, + 609 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/a5ae58c57153f6df9ae9bd5a86843a5a760c1b857b6f492a818443fb6613131c.jpg", + "image_caption": [ + "Fig. 15. The image visualizes the error in tracking of helical trajectory for real-world experiments. As foreseen for the same trajectory, the tracking error of the real experiment is bigger compared to simulation tracking error. However, the results are closely comparable. The periodic error is caused by the motion in the y-axis which causes the most sway motion of the gripper." + ], + "image_footnote": [], + "bbox": [ + 109, + 674, + 464, + 857 + ], + "page_idx": 6 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1: DDPG with Action Feedback" + ], + "code_body": "Initialization: \nRandomly initialize both actor and critic networks, \n $\\mu (s|\\theta^{\\mu})\\gets \\theta^{\\mu}$ $Q(s,a|\\theta^{Q})\\gets \\theta^{Q}$ \nInitialize target networks $Q^{\\prime}$ and $\\mu^{\\prime}$ - \n $\\theta^{\\mathcal{Q}^{\\prime}}\\leftarrow \\theta^{\\mathcal{Q}},\\theta^{\\mu^{\\prime}}\\leftarrow \\theta^{\\mu}$ \nInitialize replay buffer \nTraining: \nfor $n = 1,n_{\\text{episodes}}$ do Reset environment \nReceive initial observation state $S$ \nfor $t = 1,n_{\\text{steps}}$ do for $p = 1,n_{\\text{actions}}$ do $a_{p} = \\mu (s_{t}|\\theta^{\\mu}) + \\mathcal{N}_{t}$ / $\\star$ according to current policy and exploration noise $\\star /$ return $[a_{p_1},\\dots ,a_{p_n}]$ BestAction $(Fk_{net},a_p)$ : return $\\leftarrow a_{p_i},|min(x_{target} - x_{p_i})$ Set $a_t\\gets a_{p_i}$ Execute action: $a_{t}$ Observe: reward $r_t$ and new state $s_{t + 1}$ Store transition: $(s_t,a_t,r_t,s_{t + 1})$ in $R$ Sample random batch: from nbatch transitions $(s_i,a_i,r_i,s_{t + i})$ from $R$ Set: $y_{i} = r_{i} + \\gamma Q^{\\prime}(s_{i + 1},\\mu^{\\prime}(s_{i + 1}|\\theta^{\\mu^{\\prime}})|\\theta^{Q^{\\prime}})$ Update critic by minimizing the loss: $L = \\frac{1}{N}\\sum_{i}(y_{i} - Q(s_{i},a_{i}|\\theta^{Q}))^{2}$ Update actor policy using sampled policy gradient: $\\nabla_{\\theta^{\\mu}}J\\approx$ $\\frac{1}{N}\\sum_{i}\\nabla_{a}Q(s,a|\\theta^{Q})|_{s=s_{i},a=\\mu(s_{i})}\\nabla_{\\theta^{\\mu}}\\mu(s|\\theta^{\\mu})|_{s_i}$ Update target networks, $\\theta^{Q^{\\prime}}\\gets \\tau \\theta^{Q} + (1 - \\tau)\\theta^{Q^{\\prime}}$ $\\theta^{\\mu^{\\prime}}\\gets \\tau \\theta^{\\mu} + (1 - \\tau)\\theta^{\\mu^{\\prime}}$", + "bbox": [ + 516, + 83, + 903, + 659 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "VI. CONCLUSIONS AND FUTURE WORK", + "text_level": 1, + "bbox": [ + 544, + 694, + 874, + 708 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The presented results demonstrate the direct application of RL to heavy-duty manipulators and the feasibility of directly deploying a control policy entirely learned in simulation to physical forestry cranes. The main advantage of the presented approach is that no mathematical formulation either of kinematics or dynamics is required. For our approach, we do not need the geometry information to acquire the cylinder-joint mapping which is required for the automation of such manipulators. Our approach inherently adapts the actuation dynamics which in general is a complex problem involving numerous external factors. Our controller requires minimal system information which can be easily acquired by retrofitting the manipulator, thus making the automation of such heavy manipulators very efficient and economical. We", + "bbox": [ + 504, + 714, + 913, + 926 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "made use of the available information in a simple and elegant approach to make the controller learning process much more efficient by providing feedback on the exploration actions and choosing the best one among the action candidates. However, our real-world experiment results suffered from tracking errors, mainly due intervening dynamic factors (gripper sway, backlash, actuation inaccuracies) and poorly tuned low-level control. In contrast to these intervening factors and given the fact that our controller is trained only on 1500 data points sampled from the complete manipulator trajectory, the tracking accuracy is remarkable. The controller performance can be greatly improved, by training on more data points and providing a finely tuned low-level controller.", + "bbox": [ + 86, + 66, + 486, + 261 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To address the problem of gripper sway, in future work we will extend our framework to integrate the sway motion during the learning process to model a compensating or aggressive control policy. We also plan to incorporate a generalized Long Short Term Memory (LSTM) based backlash model, to also take the backlash motion into account during training. Even though our feedback model facilitates the controller in an efficient exploration and learning, it still contains minor inaccuracies which might be affecting the learning process. A better feedback model will undoubtedly improve the controller performance. For more complex manipulation tasks we plan to use curriculum learning [20], which has been shown to accelerate and improve the learning process.", + "bbox": [ + 86, + 263, + 486, + 458 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 238, + 470, + 333, + 483 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] P. La Hera and D. O. Morales, “What do we observe when we equip a forestry crane with motion sensors?” Croatian Journal of Forest Engineering: Journal for Theory and Application of Forestry Engineering, vol. 40, no. 2, pp. 259–280, 2019.", + "[2] P. L. Hera, U. Mettin, I. R. Manchester, and A. Shiriaev, \"Identification and control of a hydraulic forestry crane,\" IFAC Proceedings Volumes, vol. 41, no. 2, pp. 2306-2311, 2008, 17th IFAC World Congress. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1474667016392941", + "[3] P. La Hera and D. Ortiz Morales, \"Model-based development of control systems for forestry cranes,\" Journal of Control Science and Engineering, vol. 2015, 2015.", + "[4] S. Fodor, C. Vázquez, and L. Freidovich, \"Automation of slewing motions for forestry cranes,\" in 2015 15th International Conference on Control, Automation and Systems (ICCAS), 2015, pp. 796-801.", + "[5] V. Mnih, K. Kavukcuoglu, D. Silver, A. A. Rusu, J. Veness, M. G. Bellemare, A. Graves, M. Riedmiller, A. K. Fidjeland, G. Ostrovski, S. Petersen, C. Beattie, A. Sadik, I. Antonoglou, H. King, D. Kumaran, D. Wierstra, S. Legg, and D. Hassabis, \"Human-level control through deep reinforcement learning,\" Nature, vol. 518, no. 7540, pp. 529-533, Feb 2015. [Online]. Available: https://doi.org/10.1038/nature14236", + "[6] W. Dabney, M. Rowland, M. Bellemare, and R. Munos, \"Distributional reinforcement learning with quantile regression,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 32, no. 1, 2018.", + "[7] V. Mnih, A. P. Badia, M. Mirza, A. Graves, T. Lillicrap, T. Harley, D. Silver, and K. Kavukcuoglu, \"Asynchronous methods for deep reinforcement learning,\" in International conference on machine learning. PMLR, 2016, pp. 1928-1937.", + "[8] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov, “Proximal policy optimization algorithms,” arXiv preprint arXiv:1707.06347, 2017.", + "[9] J. Schulman, S. Levine, P. Abbeel, M. Jordan, and P. Moritz, \"Trust region policy optimization,\" in Proceedings of the 32nd International Conference on Machine Learning, ser. Proceedings of Machine Learning Research, F. Bach and D. Blei, Eds., vol. 37. Lille, France: PMLR, 07-09 Jul 2015, pp. 1889-1897. [Online]. Available: https://proceedings.mlr.press/v37/schulman15.html" + ], + "bbox": [ + 91, + 492, + 488, + 926 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[10] P. Egli and M. Hutter, \"Towards rl-based hydraulic excavator automation,\" in 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2020, pp. 2692-2697.", + "[11] J. Andersson, K. Bodin, D. Lindmark, M. Servin, and E. Wallin, \"Reinforcement learning control of a forestry crane manipulator,\" in 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2021, pp. 2121-2126.", + "[12] H. Gietler, C. Stetco, and H. Zangl, \"Scalable retrofit angular position sensor system,\" in 2020 IEEE International Instrumentation and Measurement Technology Conference (I2MTC). IEEE, 2020, pp. 1-6.", + "[13] E. Rohmer, S. P. Singh, and M. Freese, \"V-rep: A versatile and scalable robot simulation framework,\" in 2013 IEEE/RSJ International Conference on Intelligent Robots and Systems. IEEE, 2013, pp. 1321-1326.", + "[14] E. Coumans et al., “Bullet real-time physics simulation,” URL http://bulletphysics.org, 2013.", + "[15] R. Smith et al., \"Open dynamics engine,\" 2007.", + "[16] CM-Labs, \"Vortex studio,\" CM Labs, 2020.", + "[17] J. Jerez and A. Suero, “Newton game dynamics,” Open Source Physics Engine, 2008.", + "[18] T. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra, \"Continuous control with deep reinforcement learning,\" arXiv preprint arXiv:1509.02971, 2015.", + "[19] P. Henderson, R. Islam, P. Bachman, J. Pineau, D. Precup, and D. Meger, “Deep reinforcement learning that matters,” CoRR, vol. abs/1709.06560, 2017. [Online]. Available: http://arxiv.org/abs/1709.06560", + "[20] Y. Bengio, J. Louradour, R. Collobert, and J. Weston, “Curriculum learning,” in Proceedings of the 26th annual international conference on machine learning, 2009, pp. 41–48." + ], + "bbox": [ + 509, + 66, + 911, + 409 + ], + "page_idx": 7 + } +] \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15714/5ed595a1-6284-4695-8ba1-1bc55e6429ae_model.json b/data/2025/2504_15xxx/2504.15714/5ed595a1-6284-4695-8ba1-1bc55e6429ae_model.json new file mode 100644 index 0000000000000000000000000000000000000000..1eb87656130bafe36c106d277191bf35e98ddee8 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/5ed595a1-6284-4695-8ba1-1bc55e6429ae_model.json @@ -0,0 +1,1690 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.263, + 0.061, + 0.706 + ], + "angle": 270, + "content": "arXiv:2504.15714v1 [cs.RO] 22 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.123, + 0.088, + 0.878, + 0.138 + ], + "angle": 0, + "content": "Autonomous Control of Redundant Hydraulic Manipulator Using Reinforcement Learning with Action Feedback" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.173, + 0.911, + 0.192 + ], + "angle": 0, + "content": "Rohit Dhakate1, Christian Brommer1, Christoph Böhm1, Harald Gietler2, Stephan Weiss1, and Jan Steinbrener1" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.234, + 0.49, + 0.55 + ], + "angle": 0, + "content": "Abstract- This article presents an entirely data-driven approach for autonomous control of redundant manipulators with hydraulic actuation. The approach only requires minimal system information, which is inherited from a simulation model. The non-linear hydraulic actuation dynamics are modeled using actuator networks from the data gathered during the manual operation of the manipulator to effectively emulate the real system in a simulation environment. A neural network control policy for autonomous control, based on end-effector (EE) position tracking is then learned using Reinforcement Learning (RL) with Ornstein-Uhlenbeck process noise (OUNoise) for efficient exploration. The RL agent also receives feedback based on supervised learning of the forward kinematics which facilitates selecting the best suitable action from exploration. The control policy directly provides the joint variables as outputs based on provided target EE position while taking into account the system dynamics. The joint variables are then mapped to the hydraulic valve commands, which are then fed to the system without further modifications. The proposed approach is implemented on a scaled hydraulic forwarder crane with three revolute and one prismatic joint to track the desired position of the EE in 3-Dimensional (3D) space. With the emulated dynamics and extensive learning in simulation, the results demonstrate the feasibility of deploying the learned controller directly on the real system." + }, + { + "type": "title", + "bbox": [ + 0.22, + 0.563, + 0.353, + 0.577 + ], + "angle": 0, + "content": "I. INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.586, + 0.49, + 0.706 + ], + "angle": 0, + "content": "Hydraulic cranes are versatile heavy-duty manipulators that are omnipresent in construction, mining, agriculture, or forestry for lifting and transporting heavy objects. Automation by sensor retrofitting of these manipulators tackles not only challenging and dull, dangerous, dirty (DDD) tasks concerning the handling of raw materials but also brings economic benefits by increased productivity, and effortless system upgrades according to the desired functionality." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.707, + 0.49, + 0.798 + ], + "angle": 0, + "content": "With the proposed approach, we are addressing the forest log transportation use case. The manipulator repeatedly performs a monotonous pick-and-place operation to collect and redistribute logs prepared by the harvester. Forwarder cranes mainly remain manually operated, despite continuous widespread automation in the industry. Manual operation" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.228, + 0.911, + 0.445 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.456, + 0.914, + 0.482 + ], + "angle": 0, + "content": "Fig. 1. AutoLOG manipulator (1:5 scaled forest forwarder crane): Test-bed for our RL-based controller and manipulation tasks." + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.496, + 0.913, + 0.586 + ], + "angle": 0, + "content": "of such manipulators can be both mentally and physically exhausting, when producing constant, smooth and jerk free motion with joystick, since it requires complex coordination of several hydraulic cylinders [1]. Early automatic and semi-automatic solutions were presented by [2] using analytical methods." + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.587, + 0.914, + 0.798 + ], + "angle": 0, + "content": "The barriers in automation of the forest industry can be traced not only towards complex and dynamic environments but also the complexity and variants of the manipulators depending on the manufacturer. In [1] the authors argue that the automation of the entire forwarding operation is complex as numerous tasks such as log recognition, log grasping point detection/selection and pick-and-place operations are involved. However, the authors conclude that the motion patterns of the manipulator's joints are, as expected, highly repetitive and can be automated using analytical methods. However, for analytical methods, an accurate system and environment model is of utmost necessity to achieve desired results, which could take a lot of effort and time given the complexity and redundant nature of the manipulator." + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.799, + 0.915, + 0.904 + ], + "angle": 0, + "content": "Recent advancements in reinforcement learning not only demonstrated their applications in video-games and simulations but also enabled physical robots to learn complex skills and perform operations in real-world environments. In robot manipulation, reinforcement learning is being extensively used to develop intelligent systems that only require minimal to no system and environment information." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.811, + 0.49, + 0.927 + ], + "angle": 0, + "content": "\\(^{1}\\)Rohit Dhakate, Christian Brommer, Christoph Böhm, Stephan Weiss and Jan Steinbrenner are with the Department of Smart Systems Technologies in the Control of Networked Systems Group, University of Klagenfurt, 9020 Klagenfurt, Austria {rohit.dhakate, christian.brommer, christoph.boehm, stephan.weiss, jan.steinbrenner}@ieee.org \n\\(^{2}\\)Harald Gietler is with the Department of Smart Systems Technologies in the Sensors and Actuators Group, University of Klagenfurt, 9020 Klagenfurt, Austria {harald.gietler}@aau.at \nPre-print version, accepted June/2022, DOI follows ASAP ©IEEE." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.084, + 0.067, + 0.202, + 0.08 + ], + "angle": 0, + "content": "A. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.086, + 0.49, + 0.312 + ], + "angle": 0, + "content": "Autonomous control for forest cranes has been extensively researched for the last two decades. In [3], the authors modeled the system dynamics using differential equations and applied non-linear control laws, and then performed a calibration and control tuning. While [4] also focuses on the aspect of forest crane automation, in addition to compensation for actuator nonlinearities, their main focus is on automating only the base joint (slewing motion). Until recently, all the work done towards automating forest cranes relied on model-based control. Current advancements in artificial intelligence (AI) brought substantial simplifications and advantages in tackling complex systems and problems. Within AI, RL algorithms that can be developed in a model-free domain have attracted several researchers and drove the field of automating heavy machinery with the use of AI." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.312, + 0.49, + 0.719 + ], + "angle": 0, + "content": "Several RL algorithms have been proposed to solve dynamic physical models in recent years. Among which model-free algorithms gained keen interest due to their nature of generalizing a solution to a category of a problem. In model-free methods, Q-learning based algorithms such as Deep Q-Network(DQN) [5], Quantile Regression DQN (QR-DQN) [6], learns the action-value function \\( Q(s,a) \\) which is the expected value (cumulative discounted reward) of doing an action \\( a \\) in state \\( s \\) and then following the optimal policy, which is deterministic. Whereas Policy optimization-based algorithms such as Policy gradients, Advantage Actor-Critic (A2C)/ Asynchronous Advantage Actor-Critic (A3C) [7], Proximal Policy Optimization (PPO) [8], and Trust Region Policy Optimization (TRPO) [9], the agent learns directly the policy function that maps state to action. The policy is determined without using a value function. In recent years the application of RL for complex manipulation tasks has been carried out by several researchers. In [10] the authors implemented a TRPO algorithm for automating a hydraulic excavator. The learned control policy is validated by deploying it on the actual excavator. However, they do not control the base joint which limits the motion in 2D. The authors in [11] use PPO for learning a control policy, along with curriculum learning for grasping tasks. An energy optimization goal is also added in the reward function. However, the validation of the learned policy is conducted on the same simulation platform on which it had been trained." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.727, + 0.196, + 0.74 + ], + "angle": 0, + "content": "B. Contribution" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.746, + 0.49, + 0.866 + ], + "angle": 0, + "content": "To the best of our knowledge, we present the first work for automation of a real forestry crane with artificial intelligence. Our work investigates the feasibility of applying an actuator-space control policy learned in simulation on a real-world, 4 degrees of freedom, kinematically redundant forestry crane manipulator. The learned control policy maps task-space goals directly to actuator-space commands by providing the target's cartesian position." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.867, + 0.49, + 0.927 + ], + "angle": 0, + "content": "We propose a generalized framework for autonomous control of redundant manipulators with highly non-linear hydraulic actuation. The main contributions of the proposed work are listed below," + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.067, + 0.912, + 0.156 + ], + "angle": 0, + "content": "- Fully data driven approach for position tracking controller of redundant hydraulic manipulator, with minimal system information, negating the need for analytical formulation of forward and inverse kinematics, which is a highly complex task with non-standard manipulators and is subject to change with manipulator models." + }, + { + "type": "text", + "bbox": [ + 0.524, + 0.157, + 0.913, + 0.218 + ], + "angle": 0, + "content": "- Emulated hydraulic actuation dynamics to precisely map from cylinder displacement to joint angles and vice-versa, eliminating the need for formulating the cylinder-joint mapping using geometry." + }, + { + "type": "text", + "bbox": [ + 0.524, + 0.218, + 0.913, + 0.277 + ], + "angle": 0, + "content": "- Improvement on baseline RL controller, with feedback to predicted actions from forward kinematics network using supervised learning, which directly outputs valve commands for the required target EE position." + }, + { + "type": "text", + "bbox": [ + 0.524, + 0.278, + 0.913, + 0.398 + ], + "angle": 0, + "content": "- A Sim-2-Real deployment of simulation learnt control policy onto real manipulator directly without any adaptation. To the best of our knowledge, this is the first time a Sim-2-Real transfer of RL control policy is deployed on a heavy duty manipulator for 3D position tracking in real-world. The controller performs well in tracking circular and helical trajectories both in simulation and real-world experiments." + }, + { + "type": "list", + "bbox": [ + 0.523, + 0.067, + 0.913, + 0.398 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.601, + 0.407, + 0.82, + 0.421 + ], + "angle": 0, + "content": "II. SYSTEM DESCRIPTION" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.426, + 0.915, + 0.624 + ], + "angle": 0, + "content": "The Autonomous Log Ordering through Robotic Grasping (AutoLOG) manipulator, which is a 1:5 scaled-down model of an actual forest forwarder crane, is used as a test-bed for autonomous manipulation tasks (see Fig 1). The manipulator is powered using hydraulic cylinders for its joint motions. With controllable 5 degrees of freedom, the EE can be controlled for its 3D position and yaw angle, making the manipulator redundant in nature. However, for our learning-based control task, we omit the yaw component and only focus on the 3D position of the EE. The yaw parameter of the system is application dependent, such as aligning the yaw with respect to the log orientation for pick-and-place tasks. A description of manipulator configuration is shown in Fig 2." + }, + { + "type": "image", + "bbox": [ + 0.569, + 0.646, + 0.852, + 0.852 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.862, + 0.915, + 0.91 + ], + "angle": 0, + "content": "Fig. 2. Manipulator description: The figure displays the kinematic configuration of the manipulator. The manipulator has 4 revolute and 1 prismatic joint. All the joints in addition to the grapple are actuated using hydraulic cylinders." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.101, + 0.066, + 0.49, + 0.322 + ], + "angle": 0, + "content": "1) Joint Angles / Displacement Measurements: For forest crane actuators, direct access to the inputs and outputs is not always available. Hence we retrofit our manipulator with exteroceptive sensors. The manipulator comprises three revolute and one prismatic joint. Just before the revolute joint 4, we have 2 orthogonal underactuated joints, which cause the gripper to sway freely in a 3D space. The joint states for revolute joints 2, 3, and prismatic joint are obtained by mapping the cylinder displacements to joint angles. We use Waycon SX50 draw-wire sensors to measure the cylinder displacements with a measurement error of \\(0.0002\\mathrm{mm}\\) over a displacement of \\(1250\\mathrm{mm}\\). For revolute joint 1, we use a retrofitted inductance-based angular position sensor which provides absolute angle measurement with a maximum measurement error of 0.8 degrees [12]." + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.323, + 0.49, + 0.399 + ], + "angle": 0, + "content": "2) Electric Control Valves: For autonomous control of the manipulator, the hydraulic proportional valves (electro-hydraulic) are controlled using a Pulse Width Modulation (PWM) control which changes the fluid flow in cylinders according to required joint values." + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.399, + 0.49, + 0.445 + ], + "angle": 0, + "content": "3) Requirements for Approach: Our proposed method requires minimal system information. Table I lists the inputs and outputs of our proposed approach." + }, + { + "type": "list", + "bbox": [ + 0.1, + 0.066, + 0.49, + 0.445 + ], + "angle": 0, + "content": null + }, + { + "type": "table_caption", + "bbox": [ + 0.186, + 0.457, + 0.388, + 0.484 + ], + "angle": 0, + "content": "TABLEI RL AGENT - INPUTS AND OUTPUTS" + }, + { + "type": "table", + "bbox": [ + 0.165, + 0.49, + 0.406, + 0.544 + ], + "angle": 0, + "content": "
ParametersInputsOutput
Joint valuesqtqt+1
Current EE positionXt
Target EE positionXt+1
" + }, + { + "type": "title", + "bbox": [ + 0.156, + 0.573, + 0.416, + 0.587 + ], + "angle": 0, + "content": "III. SIMULATION FRAMEWORK" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.595, + 0.49, + 0.821 + ], + "angle": 0, + "content": "We use CoppeliaSim (formerly V-REP) [13] as our simulation framework to train the RL agent. CoppeliaSim provides a wide range of functionalities and supports multiple physics engines including Bullet [14], ODE [15], Vortex [16] and Newton [17]. The simulation scene is generated using a Computer Aided Design (CAD) model of the manipulator. The scene is dynamically enabled using Bullet 2.78 physics engine to render our simulation. The simulator provides a kinematics calculation module to compute forward and inverse kinematics of the manipulator chain, however we only use the position information of the scene objects (joints and end-effector) for our observations. Observations from the simulator can be considered as measurement from our retrofitted sensors on the real system. To control the manipulator we use a python remote API Client." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.821, + 0.49, + 0.928 + ], + "angle": 0, + "content": "The simulation model is shown in Fig 3. The gripper (red object) is detached for simulations. Thus our simulation setup does not have the cylinder displacements as control inputs. Instead, the joint variables are provided directly to the simulator. However, the resulting joint variables from the learned controller are converted to cylinder displacements using the actuator network." + }, + { + "type": "image", + "bbox": [ + 0.548, + 0.062, + 0.875, + 0.279 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.29, + 0.913, + 0.327 + ], + "angle": 0, + "content": "Fig. 3. Simulation model in CoppeliaSim. The gripper (red object) is detached for training since the effects of gripper sway is out of the scope of the proposed approach." + }, + { + "type": "title", + "bbox": [ + 0.651, + 0.359, + 0.768, + 0.372 + ], + "angle": 0, + "content": "IV. METHODS" + }, + { + "type": "title", + "bbox": [ + 0.507, + 0.388, + 0.597, + 0.401 + ], + "angle": 0, + "content": "A. Overview" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.412, + 0.913, + 0.579 + ], + "angle": 0, + "content": "Fig 4 shows an overview of our approach. We train an actuator and a forward network using supervised learning. The actuator network incorporates the non-linear dynamics involved in the hydraulic actuation and is trained to map cylinder displacement to joint variables and vice-versa. The forward network is a mapping from joint space to operation space of the manipulator. The RL agent (DDPG) is then trained in the simulation to reach a target 3D position from a random initial joint configuration. The trained RL agent is first evaluated for a trajectory tracking task in simulation and then is deployed on the real manipulator for final validation." + }, + { + "type": "image", + "bbox": [ + 0.565, + 0.607, + 0.857, + 0.715 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.676, + 0.716, + 0.742, + 0.726 + ], + "angle": 0, + "content": "Sim-2-Real" + }, + { + "type": "image", + "bbox": [ + 0.565, + 0.727, + 0.855, + 0.835 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.847, + 0.913, + 0.906 + ], + "angle": 0, + "content": "Fig. 4. RL control architecture: The image shows an architecture overview of our proposed approach. The training is done completely offline on a simulation platform, it shows the interaction between forward network, RL agent and the simulation platform. Sim-2-Real transfer of the trained controller is validated by directly deploying it on the physical system." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.084, + 0.067, + 0.24, + 0.081 + ], + "angle": 0, + "content": "B. Network Modelling" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.087, + 0.49, + 0.117 + ], + "angle": 0, + "content": "The two supervised learning networks facilitate our approach of learning based control." + }, + { + "type": "text", + "bbox": [ + 0.101, + 0.12, + 0.49, + 0.21 + ], + "angle": 0, + "content": "1) Actuator Network: The actuator network performs a bi-directional mapping between cylinder displacements and joint variables. Our RL agent outputs joint variables for the target goal, whereas low-level manipulator control takes valve commands (cylinder displacements) as control inputs." + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.21, + 0.49, + 0.255 + ], + "angle": 0, + "content": "2) Forward Network: The forward network takes current joint variables as inputs and returns the 3D position of EE." + }, + { + "type": "list", + "bbox": [ + 0.1, + 0.12, + 0.49, + 0.255 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.266, + 0.219, + 0.279 + ], + "angle": 0, + "content": "C. Data Collection" + }, + { + "type": "text", + "bbox": [ + 0.102, + 0.285, + 0.491, + 0.497 + ], + "angle": 0, + "content": "1) Actuator Network Data: For the actuator network, we collected input-output data from the real system during manual operation. We recorded the cylinder displacements using the retrofitted draw-wire sensors, and a motion capture system is used to measure the respective angles, since our system does not have an alternative for direct angle measurement for revolute joints 2 and 3. The cylinder control inputs were provided using a remote control designed for the manipulator. The data is collected with different cylinder velocities to capture the hydraulic actuation dynamics effectively. The collected data is believed to be incorporating all the non-linear dynamics involved in the mapping between cylinders and respective angles, see Fig 5." + }, + { + "type": "image", + "bbox": [ + 0.109, + 0.509, + 0.462, + 0.596 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.109, + 0.603, + 0.459, + 0.692 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.703, + 0.49, + 0.761 + ], + "angle": 0, + "content": "Fig. 5. Real data to train the actuator model is gathered from the physical manipulator. The figure shows the joint angles w.r.t. the cylinder displacement. The cylinder displacements are measured using a draw-wire sensor and corresponding joint angles are recorded using motion capture system." + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.776, + 0.49, + 0.868 + ], + "angle": 0, + "content": "2) Forward Network Data: To train the forward network, we acquired the joint variables and EE position data autonomously by setting a random joint configuration for each data point and recording the EE position using motion capture, as shown in Fig 6. The collected data also gave an insight into the manipulator work-space." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.878, + 0.231, + 0.892 + ], + "angle": 0, + "content": "D. Network Training" + }, + { + "type": "text", + "bbox": [ + 0.101, + 0.897, + 0.49, + 0.927 + ], + "angle": 0, + "content": "1) Actuator Network Training: We train separate networks for each joint-cylinder mapping. Actuator network-2" + }, + { + "type": "image", + "bbox": [ + 0.534, + 0.061, + 0.885, + 0.149 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.533, + 0.153, + 0.885, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.255, + 0.914, + 0.314 + ], + "angle": 0, + "content": "Fig. 6. The image displays the real data recorded for forward kinematics in an autonomous fashion, to train our forward network. For each data-point random joint configuration is set and corresponding EE position is recorded using motion capture system. The sampled random configurations covers the full range of cylinder displacements." + }, + { + "type": "text", + "bbox": [ + 0.545, + 0.338, + 0.915, + 0.503 + ], + "angle": 0, + "content": "maps joint2-cylinder2, while actuator network-3 maps joint3-cylinder3. The actuator network-2 is trained using a simple multi-layer perceptron (MLP) with 3 hidden layers (with 256-128-128 hidden units) and non-linear rectified linear unit (ReLU) activation. We used Adam optimizer with a learning rate of 1e-4. The model predicts the cylinder position for a given joint angle. Whereas the actuator network-3 uses an MLP with only 2 hidden layers(with 128-128 hidden units). Fig 7 and 8 shows the validation of trained actuator networks." + }, + { + "type": "image", + "bbox": [ + 0.534, + 0.528, + 0.885, + 0.617 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.539, + 0.625, + 0.885, + 0.711 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.723, + 0.914, + 0.76 + ], + "angle": 0, + "content": "Fig. 7. The figure shows validation results of trained actuator network for joint 2. Excluding error spikes at few instances the network precisely learned the cylinder-joint mapping." + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.79, + 0.914, + 0.913 + ], + "angle": 0, + "content": "2) Forward Network Training: Our forward network is a multi-input-multi-output (MIMO) mapping from joint variables to EE position. The network is trained using an MLP with only 2 hidden layers (with 256-128 hidden units). Despite training the network on only 500 data points, Fig 9 shows that the generalization is very accurate with a maximum prediction error of only (0.0159, 0.0205, 0.0136)m in x, y, and z, respectively." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.11, + 0.06, + 0.465, + 0.15 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.118, + 0.154, + 0.465, + 0.244 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.255, + 0.49, + 0.28 + ], + "angle": 0, + "content": "Fig. 8. Validation results of the trained actuator network for joint 3 are shown in this figure." + }, + { + "type": "image", + "bbox": [ + 0.109, + 0.285, + 0.465, + 0.342 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.11, + 0.344, + 0.464, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.403, + 0.464, + 0.468 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.479, + 0.49, + 0.515 + ], + "angle": 0, + "content": "Fig. 9. Figure shows evaluation of the forward network. With a multi-input structure involving 4 joint variables, the network trained very efficiently to return 3D position of the EE." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.525, + 0.345, + 0.539 + ], + "angle": 0, + "content": "E. Reinforcement Learning Controller" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.544, + 0.49, + 0.634 + ], + "angle": 0, + "content": "Our proposed learning-based controller uses RL to synthesize a model-free task-space position tracking controller. The RL controller learns the inverse kinematics of the manipulator, which cannot be formulated analytically without any optimization objectives due to the redundant nature of the manipulator." + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.635, + 0.364, + 0.649 + ], + "angle": 0, + "content": "Reinforcement Learning Preliminaries:" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.65, + 0.49, + 0.725 + ], + "angle": 0, + "content": "We formalized our RL problem as a Markov decision process (MDP), which is a discrete-time stochastic control process. We use MDP, which provides a mathematical framework for predicting outcomes where the environment is fully observable. The MDP is characterized by," + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.728, + 0.44, + 0.742 + ], + "angle": 0, + "content": "- state \\((s)\\): state of the agent in the environment" + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.743, + 0.475, + 0.758 + ], + "angle": 0, + "content": "- action \\((a)\\): predicted/ conducted action by the agent" + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.759, + 0.488, + 0.787 + ], + "angle": 0, + "content": "- reward \\((r)\\): a scalar valued reward based on performed action and achieved state" + }, + { + "type": "text", + "bbox": [ + 0.1, + 0.789, + 0.488, + 0.818 + ], + "angle": 0, + "content": "- policy \\((\\pi(s|a))\\): decision making function of state-action pair" + }, + { + "type": "list", + "bbox": [ + 0.1, + 0.728, + 0.488, + 0.818 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.821, + 0.445, + 0.836 + ], + "angle": 0, + "content": "A simple actor-critic architecture is shown in Fig 10." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.837, + 0.49, + 0.928 + ], + "angle": 0, + "content": "At a given discrete time step \\(t\\), the state of the system is given by, \\(s_t \\in S\\). The agent makes an observation of the environment \\(o_t \\in O\\). Performing an action \\(a_t \\in A\\) according to the policy distribution \\(\\pi(a|s)\\), the agent receives an immediate scalar reward \\(r_t(s_t, a_t)\\) according to the specified reward function \\(R(s, a)\\) providing an updated state \\(s_{t+1}' \\in S\\)." + }, + { + "type": "image", + "bbox": [ + 0.604, + 0.06, + 0.821, + 0.166 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.175, + 0.914, + 0.223 + ], + "angle": 0, + "content": "Fig. 10. A simple architecture of actor-critic method of RL approach is described. It shows the main operation of any RL based algorithm, with state, action, reward, agent and environment being the main components of an RL algorithm." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.232, + 0.913, + 0.337 + ], + "angle": 0, + "content": "The goal of RL algorithms is to find the optimal policy \\(\\pi^{*}(a|s)\\), such that the agent takes the optimal action at any given state in order to maximize the expected return. Here, the deep RL approach involves parameterizing the policy \\(\\pi\\) as a neural network \\(\\pi(\\theta)\\) with parameters \\(\\theta \\in \\Theta\\). The resulting policy approximator outputs a vector of actuator-space control signals at each time step." + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.338, + 0.914, + 0.61 + ], + "angle": 0, + "content": "We use DDPG [18] because it combines both Q-learning and policy optimization approaches. DDPG has an actor-critic architecture, where the critic network determines the Q value, and the actor network determines the actions to be taken. The actor network in DDPG simply uses the negative average Q value generated by the critic model as a loss and learns to generate actions to maximize the Q value in each state. An experience replay buffer stores all the experiences and draws a batch to train the networks. To the DDPG baseline, we added feedback to the predicted actions using our forward network for efficient exploration. Using the current policy, we predict a specified number of actions, which is then fed to the forward network to find the best actions based on the norm distance between EE position from predicted actions and the target position. The selected action is then used to perform a \\((s_t, a_t, r_t, s_{t+1}')\\) step to get the next state \\(s_{t+1}'\\). The contents of our system state and actions are described in Table II." + }, + { + "type": "table_caption", + "bbox": [ + 0.612, + 0.619, + 0.808, + 0.646 + ], + "angle": 0, + "content": "TABLE II DDPG ALGORITHM COMPONENTS" + }, + { + "type": "table", + "bbox": [ + 0.534, + 0.657, + 0.884, + 0.726 + ], + "angle": 0, + "content": "
ParametersContentsDimension
StateObservation: Joint variables4x1
Achieved goal: Current EE3x1
Desired goal: Target EE3x1
ActionsJoint Variables: [J1, J2, J3, J4]4x1
" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.742, + 0.914, + 0.862 + ], + "angle": 0, + "content": "We give a constant reward \\( r_t^{step} \\) for each time-step which improves the learning performance. A distance reward \\( r_t^{dist} \\) which helps to learn the reaching task is given based on the norm distance between current and target EE position. We also add a joint limit avoidance reward \\( r_t^{jlim} \\) which discourages the agent from learning infeasible joint configurations. A complete episode reward is the sum of all the aforementioned reward functions." + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.878, + 0.812, + 0.892 + ], + "angle": 0, + "content": "Our reward function is defined as follows," + }, + { + "type": "equation", + "bbox": [ + 0.623, + 0.894, + 0.913, + 0.914 + ], + "angle": 0, + "content": "\\[\nr _ {t} = r _ {\\mathrm {t}} ^ {\\text {s t e p}} + r _ {\\mathrm {t}} ^ {\\text {d i s t}} + r _ {\\mathrm {t}} ^ {\\text {j l i m}} \\tag {1}\n\\]" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.1, + 0.068, + 0.151, + 0.08 + ], + "angle": 0, + "content": "where," + }, + { + "type": "equation", + "bbox": [ + 0.1, + 0.091, + 0.422, + 0.171 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} r _ {t} ^ {s t e p} = 0. 0 0 1 \\\\ r _ {t} ^ {d i s t} = - \\left(\\left\\| x _ {t + 1} - x _ {t} \\right\\| _ {2}\\right) + 0. 0 0 2 \\\\ r _ {t} ^ {j l i m} = \\left\\{ \\begin{array}{l l} - 0. 0 0 0 5, & \\text {i f , j > j _ {m a x} o r j < j _ {m i n}} \\\\ 0, & \\text {i f , j _ {m i n} \\geq j \\leq j _ {m a x}} \\end{array} \\right. \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.175, + 0.49, + 0.31 + ], + "angle": 0, + "content": "We start the simulation with random initial joint configuration, and our RL agent acquires observations from the simulation environment which forms our system state. The actor network then generates random actions based on the current state and exploration noise. These actions (joint variables as control inputs) are then filtered using the forward network to select the best action, which is then carried out in a simulation step. The simulation is carried out at \\(100\\mathrm{hz}\\), the same rate our real system is operated." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.31, + 0.49, + 0.432 + ], + "angle": 0, + "content": "The DDPG algorithm is claimed to be sensitive to hyperparameters, which we observed during tuning of the hyperparameters. In [19], it is shown that DDPG with tuned hyperparameters outperforms several other policy optimization algorithms in stable environments. We modified the hyperparameters from the stable baseline parameters to suit our training environment. Table III shows the hyperparameters used for our system." + }, + { + "type": "table_caption", + "bbox": [ + 0.192, + 0.443, + 0.383, + 0.47 + ], + "angle": 0, + "content": "TABLE III ALGORITHM HYPERPARAMETERS" + }, + { + "type": "table", + "bbox": [ + 0.149, + 0.48, + 0.422, + 0.605 + ], + "angle": 0, + "content": "
ParametersVariableValues
Number of episodesn Episodes1500
Number of stepsn Steps1000
Buffer sizenbuffer1e + 06
Batch sizenbatch1024
Discount factorγ0.99
Soft target updateτ1e - 03
Actor learning ratelrac1e - 03
Critic learning ratelrcr1e - 03
OU Noiseσ0.1
" + }, + { + "type": "text", + "bbox": [ + 0.099, + 0.621, + 0.456, + 0.637 + ], + "angle": 0, + "content": "Our complete approach is described in Algorithm 1." + }, + { + "type": "title", + "bbox": [ + 0.235, + 0.645, + 0.338, + 0.659 + ], + "angle": 0, + "content": "V. RESULTS" + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.666, + 0.236, + 0.68 + ], + "angle": 0, + "content": "A. Simulation Results" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.685, + 0.49, + 0.79 + ], + "angle": 0, + "content": "We trained two different policies, Policy 1 (with action feedback) and Policy 2 (without action feedback) for 1500 episodes with randomly sampled targets from the manipulator work-space. All the hyper-parameters and simulation parameters are kept identical, with the feedback to the explored actions being the only distinction between the two policies." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.791, + 0.49, + 0.851 + ], + "angle": 0, + "content": "Fig 11 shows the cumulative reward for both the policies during the training episodes. Policy 1 constantly acquires better rewards than Policy 2 for each episode, validating our approach of efficient exploration using action feedback." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.852, + 0.491, + 0.927 + ], + "angle": 0, + "content": "We validated both policies for tracking a helical trajectory. From the trajectory tracking results shown in Fig 12, it can be seen that the tracking accuracy for policy 1 is better than policy 2. The absolute tracking error is shown in 13. The Root Mean Squared Error (RMSE) for Policy 1 is" + }, + { + "type": "image", + "bbox": [ + 0.534, + 0.06, + 0.887, + 0.245 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.255, + 0.915, + 0.303 + ], + "angle": 0, + "content": "Fig. 11. Episode training rewards for Policy 1 (with feedback) and Policy 2 (without feedback) are shown. It is evident from the figure that Policy 1 is exploring efficiently because of the provided feedback. The feedback assists in selecting a meaningful action exploration." + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.311, + 0.915, + 0.402 + ], + "angle": 0, + "content": "[0.017, 0.008, 0.01], whereas for Policy 2 is [0.031, 0.017, 0.02]. Though we trained our RL agent within a defined actual manipulator work-space, we observed that the learned policy generalized the target reaching task and could perform trajectory tracking even outside the work-space on which it is trained." + }, + { + "type": "image", + "bbox": [ + 0.571, + 0.412, + 0.855, + 0.578 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.588, + 0.915, + 0.625 + ], + "angle": 0, + "content": "Fig. 12. In this figure we are comparing the 2 trained policies on a trajectory tracking task in simulation. Policy 1 performs better in tracking the helical trajectory in contrast to Policy 2." + }, + { + "type": "image", + "bbox": [ + 0.532, + 0.648, + 0.888, + 0.833 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.506, + 0.845, + 0.915, + 0.871 + ], + "angle": 0, + "content": "Fig. 13. The image shows the trajectory tracking error for Policy 1 and Policy 2." + }, + { + "type": "title", + "bbox": [ + 0.507, + 0.877, + 0.698, + 0.893 + ], + "angle": 0, + "content": "B. Real World Experiments" + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.897, + 0.915, + 0.927 + ], + "angle": 0, + "content": "We deployed Policy 1 directly on the real manipulator without any modifications to the outputs from the learned" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.192, + 0.06, + 0.382, + 0.087 + ], + "angle": 0, + "content": "TABLE IV TRAJECTORY TRACKING ERRORS" + }, + { + "type": "table", + "bbox": [ + 0.186, + 0.098, + 0.384, + 0.14 + ], + "angle": 0, + "content": "
ExperimentMax. Error (mm)
Simulation27.2, 14.5, 26.3
Real-World75.2, 80.1, 73.1
" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.169, + 0.489, + 0.214 + ], + "angle": 0, + "content": "policy. We validated our learning-based controller approach in real-world experiments by tracking circular and helical trajectories." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.215, + 0.49, + 0.379 + ], + "angle": 0, + "content": "As we trained our control policy by detaching the gripper in simulation, we did not account for the dynamic sway of the gripper when the manipulator is in motion, which is currently out of the scope of our proposed approach. The real-world trajectory tracking experiments shows that the manipulator is successfully tracking the target trajectory, see Fig 14, however, the unmodeled and unaccounted sway induced some tracking errors during the motion see 15. Table IV shows the maximum tracking error for the helical trajectory in simulation and real-world using our learned controller." + }, + { + "type": "image", + "bbox": [ + 0.11, + 0.391, + 0.465, + 0.61 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.62, + 0.49, + 0.668 + ], + "angle": 0, + "content": "Fig. 14. The figure displays the real-world experiment results of trajectory tracking from deployed Policy 1 onto the manipulator. The tracking is performed well given the harsh dynamic conditions of our system. The results validate the Sim-2-Real transfer of our learning-based control approach." + }, + { + "type": "image", + "bbox": [ + 0.11, + 0.675, + 0.465, + 0.858 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.87, + 0.49, + 0.929 + ], + "angle": 0, + "content": "Fig. 15. The image visualizes the error in tracking of helical trajectory for real-world experiments. As foreseen for the same trajectory, the tracking error of the real experiment is bigger compared to simulation tracking error. However, the results are closely comparable. The periodic error is caused by the motion in the y-axis which causes the most sway motion of the gripper." + }, + { + "type": "code_caption", + "bbox": [ + 0.516, + 0.068, + 0.816, + 0.081 + ], + "angle": 0, + "content": "Algorithm 1: DDPG with Action Feedback" + }, + { + "type": "algorithm", + "bbox": [ + 0.517, + 0.084, + 0.905, + 0.66 + ], + "angle": 0, + "content": "Initialization: \nRandomly initialize both actor and critic networks, \n\\(\\mu (s|\\theta^{\\mu})\\gets \\theta^{\\mu}\\) \\(Q(s,a|\\theta^{Q})\\gets \\theta^{Q}\\) \nInitialize target networks \\(Q^{\\prime}\\) and \\(\\mu^{\\prime}\\) - \n\\(\\theta^{\\mathcal{Q}^{\\prime}}\\leftarrow \\theta^{\\mathcal{Q}},\\theta^{\\mu^{\\prime}}\\leftarrow \\theta^{\\mu}\\) \nInitialize replay buffer \nTraining: \nfor \\(n = 1,n_{\\text{episodes}}\\) do Reset environment \nReceive initial observation state \\(S\\) \nfor \\(t = 1,n_{\\text{steps}}\\) do for \\(p = 1,n_{\\text{actions}}\\) do \\(a_{p} = \\mu (s_{t}|\\theta^{\\mu}) + \\mathcal{N}_{t}\\) / \\(\\star\\) according to current policy and exploration noise \\(\\star /\\) return \\([a_{p_1},\\dots ,a_{p_n}]\\) BestAction \\((Fk_{net},a_p)\\) : return \\(\\leftarrow a_{p_i},|min(x_{target} - x_{p_i})\\) Set \\(a_t\\gets a_{p_i}\\) Execute action: \\(a_{t}\\) Observe: reward \\(r_t\\) and new state \\(s_{t + 1}\\) Store transition: \\((s_t,a_t,r_t,s_{t + 1})\\) in \\(R\\) Sample random batch: from nbatch transitions \\((s_i,a_i,r_i,s_{t + i})\\) from \\(R\\) Set: \\(y_{i} = r_{i} + \\gamma Q^{\\prime}(s_{i + 1},\\mu^{\\prime}(s_{i + 1}|\\theta^{\\mu^{\\prime}})|\\theta^{Q^{\\prime}})\\) Update critic by minimizing the loss: \\(L = \\frac{1}{N}\\sum_{i}(y_{i} - Q(s_{i},a_{i}|\\theta^{Q}))^{2}\\) Update actor policy using sampled policy gradient: \\(\\nabla_{\\theta^{\\mu}}J\\approx\\) \\(\\frac{1}{N}\\sum_{i}\\nabla_{a}Q(s,a|\\theta^{Q})|_{s=s_{i},a=\\mu(s_{i})}\\nabla_{\\theta^{\\mu}}\\mu(s|\\theta^{\\mu})|_{s_i}\\) Update target networks, \\(\\theta^{Q^{\\prime}}\\gets \\tau \\theta^{Q} + (1 - \\tau)\\theta^{Q^{\\prime}}\\) \\(\\theta^{\\mu^{\\prime}}\\gets \\tau \\theta^{\\mu} + (1 - \\tau)\\theta^{\\mu^{\\prime}}\\)" + }, + { + "type": "title", + "bbox": [ + 0.545, + 0.695, + 0.875, + 0.709 + ], + "angle": 0, + "content": "VI. CONCLUSIONS AND FUTURE WORK" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.715, + 0.914, + 0.927 + ], + "angle": 0, + "content": "The presented results demonstrate the direct application of RL to heavy-duty manipulators and the feasibility of directly deploying a control policy entirely learned in simulation to physical forestry cranes. The main advantage of the presented approach is that no mathematical formulation either of kinematics or dynamics is required. For our approach, we do not need the geometry information to acquire the cylinder-joint mapping which is required for the automation of such manipulators. Our approach inherently adapts the actuation dynamics which in general is a complex problem involving numerous external factors. Our controller requires minimal system information which can be easily acquired by retrofitting the manipulator, thus making the automation of such heavy manipulators very efficient and economical. We" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.087, + 0.067, + 0.488, + 0.262 + ], + "angle": 0, + "content": "made use of the available information in a simple and elegant approach to make the controller learning process much more efficient by providing feedback on the exploration actions and choosing the best one among the action candidates. However, our real-world experiment results suffered from tracking errors, mainly due intervening dynamic factors (gripper sway, backlash, actuation inaccuracies) and poorly tuned low-level control. In contrast to these intervening factors and given the fact that our controller is trained only on 1500 data points sampled from the complete manipulator trajectory, the tracking accuracy is remarkable. The controller performance can be greatly improved, by training on more data points and providing a finely tuned low-level controller." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.264, + 0.488, + 0.459 + ], + "angle": 0, + "content": "To address the problem of gripper sway, in future work we will extend our framework to integrate the sway motion during the learning process to model a compensating or aggressive control policy. We also plan to incorporate a generalized Long Short Term Memory (LSTM) based backlash model, to also take the backlash motion into account during training. Even though our feedback model facilitates the controller in an efficient exploration and learning, it still contains minor inaccuracies which might be affecting the learning process. A better feedback model will undoubtedly improve the controller performance. For more complex manipulation tasks we plan to use curriculum learning [20], which has been shown to accelerate and improve the learning process." + }, + { + "type": "title", + "bbox": [ + 0.24, + 0.472, + 0.334, + 0.484 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.493, + 0.49, + 0.54 + ], + "angle": 0, + "content": "[1] P. La Hera and D. O. Morales, “What do we observe when we equip a forestry crane with motion sensors?” Croatian Journal of Forest Engineering: Journal for Theory and Application of Forestry Engineering, vol. 40, no. 2, pp. 259–280, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.54, + 0.49, + 0.596 + ], + "angle": 0, + "content": "[2] P. L. Hera, U. Mettin, I. R. Manchester, and A. Shiriaev, \"Identification and control of a hydraulic forestry crane,\" IFAC Proceedings Volumes, vol. 41, no. 2, pp. 2306-2311, 2008, 17th IFAC World Congress. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1474667016392941" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.596, + 0.49, + 0.63 + ], + "angle": 0, + "content": "[3] P. La Hera and D. Ortiz Morales, \"Model-based development of control systems for forestry cranes,\" Journal of Control Science and Engineering, vol. 2015, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.63, + 0.488, + 0.664 + ], + "angle": 0, + "content": "[4] S. Fodor, C. Vázquez, and L. Freidovich, \"Automation of slewing motions for forestry cranes,\" in 2015 15th International Conference on Control, Automation and Systems (ICCAS), 2015, pp. 796-801." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.664, + 0.488, + 0.744 + ], + "angle": 0, + "content": "[5] V. Mnih, K. Kavukcuoglu, D. Silver, A. A. Rusu, J. Veness, M. G. Bellemare, A. Graves, M. Riedmiller, A. K. Fidjeland, G. Ostrovski, S. Petersen, C. Beattie, A. Sadik, I. Antonoglou, H. King, D. Kumaran, D. Wierstra, S. Legg, and D. Hassabis, \"Human-level control through deep reinforcement learning,\" Nature, vol. 518, no. 7540, pp. 529-533, Feb 2015. [Online]. Available: https://doi.org/10.1038/nature14236" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.744, + 0.488, + 0.778 + ], + "angle": 0, + "content": "[6] W. Dabney, M. Rowland, M. Bellemare, and R. Munos, \"Distributional reinforcement learning with quantile regression,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 32, no. 1, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.778, + 0.488, + 0.823 + ], + "angle": 0, + "content": "[7] V. Mnih, A. P. Badia, M. Mirza, A. Graves, T. Lillicrap, T. Harley, D. Silver, and K. Kavukcuoglu, \"Asynchronous methods for deep reinforcement learning,\" in International conference on machine learning. PMLR, 2016, pp. 1928-1937." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.823, + 0.488, + 0.857 + ], + "angle": 0, + "content": "[8] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov, “Proximal policy optimization algorithms,” arXiv preprint arXiv:1707.06347, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.857, + 0.488, + 0.927 + ], + "angle": 0, + "content": "[9] J. Schulman, S. Levine, P. Abbeel, M. Jordan, and P. Moritz, \"Trust region policy optimization,\" in Proceedings of the 32nd International Conference on Machine Learning, ser. Proceedings of Machine Learning Research, F. Bach and D. Blei, Eds., vol. 37. Lille, France: PMLR, 07-09 Jul 2015, pp. 1889-1897. [Online]. Available: https://proceedings.mlr.press/v37/schulman15.html" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.493, + 0.49, + 0.927 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.068, + 0.912, + 0.103 + ], + "angle": 0, + "content": "[10] P. Egli and M. Hutter, \"Towards rl-based hydraulic excavator automation,\" in 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2020, pp. 2692-2697." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.103, + 0.912, + 0.149 + ], + "angle": 0, + "content": "[11] J. Andersson, K. Bodin, D. Lindmark, M. Servin, and E. Wallin, \"Reinforcement learning control of a forestry crane manipulator,\" in 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2021, pp. 2121-2126." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.149, + 0.912, + 0.182 + ], + "angle": 0, + "content": "[12] H. Gietler, C. Stetco, and H. Zangl, \"Scalable retrofit angular position sensor system,\" in 2020 IEEE International Instrumentation and Measurement Technology Conference (I2MTC). IEEE, 2020, pp. 1-6." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.182, + 0.912, + 0.227 + ], + "angle": 0, + "content": "[13] E. Rohmer, S. P. Singh, and M. Freese, \"V-rep: A versatile and scalable robot simulation framework,\" in 2013 IEEE/RSJ International Conference on Intelligent Robots and Systems. IEEE, 2013, pp. 1321-1326." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.227, + 0.912, + 0.251 + ], + "angle": 0, + "content": "[14] E. Coumans et al., “Bullet real-time physics simulation,” URL http://bulletphysics.org, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.251, + 0.799, + 0.261 + ], + "angle": 0, + "content": "[15] R. Smith et al., \"Open dynamics engine,\" 2007." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.261, + 0.774, + 0.272 + ], + "angle": 0, + "content": "[16] CM-Labs, \"Vortex studio,\" CM Labs, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.272, + 0.912, + 0.295 + ], + "angle": 0, + "content": "[17] J. Jerez and A. Suero, “Newton game dynamics,” Open Source Physics Engine, 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.295, + 0.912, + 0.328 + ], + "angle": 0, + "content": "[18] T. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra, \"Continuous control with deep reinforcement learning,\" arXiv preprint arXiv:1509.02971, 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.328, + 0.912, + 0.374 + ], + "angle": 0, + "content": "[19] P. Henderson, R. Islam, P. Bachman, J. Pineau, D. Precup, and D. Meger, “Deep reinforcement learning that matters,” CoRR, vol. abs/1709.06560, 2017. [Online]. Available: http://arxiv.org/abs/1709.06560" + }, + { + "type": "ref_text", + "bbox": [ + 0.51, + 0.374, + 0.912, + 0.41 + ], + "angle": 0, + "content": "[20] Y. Bengio, J. Louradour, R. Collobert, and J. Weston, “Curriculum learning,” in Proceedings of the 26th annual international conference on machine learning, 2009, pp. 41–48." + }, + { + "type": "list", + "bbox": [ + 0.51, + 0.068, + 0.912, + 0.41 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15714/5ed595a1-6284-4695-8ba1-1bc55e6429ae_origin.pdf b/data/2025/2504_15xxx/2504.15714/5ed595a1-6284-4695-8ba1-1bc55e6429ae_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..7a651b2f16045ee358a9513c18f731a977425de0 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/5ed595a1-6284-4695-8ba1-1bc55e6429ae_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:719a3561f72f6ea9e0f0264e6509c8b04b5ef0230b34512293a3d636c9a42a92 +size 5336054 diff --git a/data/2025/2504_15xxx/2504.15714/full.md b/data/2025/2504_15xxx/2504.15714/full.md new file mode 100644 index 0000000000000000000000000000000000000000..64f1e69b9516dd47b3e31ebb82cf54f01e063b47 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/full.md @@ -0,0 +1,260 @@ +# Autonomous Control of Redundant Hydraulic Manipulator Using Reinforcement Learning with Action Feedback + +Rohit Dhakate1, Christian Brommer1, Christoph Böhm1, Harald Gietler2, Stephan Weiss1, and Jan Steinbrener1 + +Abstract- This article presents an entirely data-driven approach for autonomous control of redundant manipulators with hydraulic actuation. The approach only requires minimal system information, which is inherited from a simulation model. The non-linear hydraulic actuation dynamics are modeled using actuator networks from the data gathered during the manual operation of the manipulator to effectively emulate the real system in a simulation environment. A neural network control policy for autonomous control, based on end-effector (EE) position tracking is then learned using Reinforcement Learning (RL) with Ornstein-Uhlenbeck process noise (OUNoise) for efficient exploration. The RL agent also receives feedback based on supervised learning of the forward kinematics which facilitates selecting the best suitable action from exploration. The control policy directly provides the joint variables as outputs based on provided target EE position while taking into account the system dynamics. The joint variables are then mapped to the hydraulic valve commands, which are then fed to the system without further modifications. The proposed approach is implemented on a scaled hydraulic forwarder crane with three revolute and one prismatic joint to track the desired position of the EE in 3-Dimensional (3D) space. With the emulated dynamics and extensive learning in simulation, the results demonstrate the feasibility of deploying the learned controller directly on the real system. + +# I. INTRODUCTION + +Hydraulic cranes are versatile heavy-duty manipulators that are omnipresent in construction, mining, agriculture, or forestry for lifting and transporting heavy objects. Automation by sensor retrofitting of these manipulators tackles not only challenging and dull, dangerous, dirty (DDD) tasks concerning the handling of raw materials but also brings economic benefits by increased productivity, and effortless system upgrades according to the desired functionality. + +With the proposed approach, we are addressing the forest log transportation use case. The manipulator repeatedly performs a monotonous pick-and-place operation to collect and redistribute logs prepared by the harvester. Forwarder cranes mainly remain manually operated, despite continuous widespread automation in the industry. Manual operation + +![](images/50a129a1473f93925858127e715013f22fb426c9ed9b317a9fba1f66608fdc8e.jpg) +Fig. 1. AutoLOG manipulator (1:5 scaled forest forwarder crane): Test-bed for our RL-based controller and manipulation tasks. + +of such manipulators can be both mentally and physically exhausting, when producing constant, smooth and jerk free motion with joystick, since it requires complex coordination of several hydraulic cylinders [1]. Early automatic and semi-automatic solutions were presented by [2] using analytical methods. + +The barriers in automation of the forest industry can be traced not only towards complex and dynamic environments but also the complexity and variants of the manipulators depending on the manufacturer. In [1] the authors argue that the automation of the entire forwarding operation is complex as numerous tasks such as log recognition, log grasping point detection/selection and pick-and-place operations are involved. However, the authors conclude that the motion patterns of the manipulator's joints are, as expected, highly repetitive and can be automated using analytical methods. However, for analytical methods, an accurate system and environment model is of utmost necessity to achieve desired results, which could take a lot of effort and time given the complexity and redundant nature of the manipulator. + +Recent advancements in reinforcement learning not only demonstrated their applications in video-games and simulations but also enabled physical robots to learn complex skills and perform operations in real-world environments. In robot manipulation, reinforcement learning is being extensively used to develop intelligent systems that only require minimal to no system and environment information. + +$^{1}$ Rohit Dhakate, Christian Brommer, Christoph Böhm, Stephan Weiss and Jan Steinbrenner are with the Department of Smart Systems Technologies in the Control of Networked Systems Group, University of Klagenfurt, 9020 Klagenfurt, Austria {rohit.dhakate, christian.brommer, christoph.boehm, stephan.weiss, jan.steinbrenner}@ieee.org + $^{2}$ Harald Gietler is with the Department of Smart Systems Technologies in the Sensors and Actuators Group, University of Klagenfurt, 9020 Klagenfurt, Austria {harald.gietler}@aau.at +Pre-print version, accepted June/2022, DOI follows ASAP ©IEEE. + +# A. Related Work + +Autonomous control for forest cranes has been extensively researched for the last two decades. In [3], the authors modeled the system dynamics using differential equations and applied non-linear control laws, and then performed a calibration and control tuning. While [4] also focuses on the aspect of forest crane automation, in addition to compensation for actuator nonlinearities, their main focus is on automating only the base joint (slewing motion). Until recently, all the work done towards automating forest cranes relied on model-based control. Current advancements in artificial intelligence (AI) brought substantial simplifications and advantages in tackling complex systems and problems. Within AI, RL algorithms that can be developed in a model-free domain have attracted several researchers and drove the field of automating heavy machinery with the use of AI. + +Several RL algorithms have been proposed to solve dynamic physical models in recent years. Among which model-free algorithms gained keen interest due to their nature of generalizing a solution to a category of a problem. In model-free methods, Q-learning based algorithms such as Deep Q-Network(DQN) [5], Quantile Regression DQN (QR-DQN) [6], learns the action-value function $Q(s,a)$ which is the expected value (cumulative discounted reward) of doing an action $a$ in state $s$ and then following the optimal policy, which is deterministic. Whereas Policy optimization-based algorithms such as Policy gradients, Advantage Actor-Critic (A2C)/ Asynchronous Advantage Actor-Critic (A3C) [7], Proximal Policy Optimization (PPO) [8], and Trust Region Policy Optimization (TRPO) [9], the agent learns directly the policy function that maps state to action. The policy is determined without using a value function. In recent years the application of RL for complex manipulation tasks has been carried out by several researchers. In [10] the authors implemented a TRPO algorithm for automating a hydraulic excavator. The learned control policy is validated by deploying it on the actual excavator. However, they do not control the base joint which limits the motion in 2D. The authors in [11] use PPO for learning a control policy, along with curriculum learning for grasping tasks. An energy optimization goal is also added in the reward function. However, the validation of the learned policy is conducted on the same simulation platform on which it had been trained. + +# B. Contribution + +To the best of our knowledge, we present the first work for automation of a real forestry crane with artificial intelligence. Our work investigates the feasibility of applying an actuator-space control policy learned in simulation on a real-world, 4 degrees of freedom, kinematically redundant forestry crane manipulator. The learned control policy maps task-space goals directly to actuator-space commands by providing the target's cartesian position. + +We propose a generalized framework for autonomous control of redundant manipulators with highly non-linear hydraulic actuation. The main contributions of the proposed work are listed below, + +- Fully data driven approach for position tracking controller of redundant hydraulic manipulator, with minimal system information, negating the need for analytical formulation of forward and inverse kinematics, which is a highly complex task with non-standard manipulators and is subject to change with manipulator models. +- Emulated hydraulic actuation dynamics to precisely map from cylinder displacement to joint angles and vice-versa, eliminating the need for formulating the cylinder-joint mapping using geometry. +- Improvement on baseline RL controller, with feedback to predicted actions from forward kinematics network using supervised learning, which directly outputs valve commands for the required target EE position. +- A Sim-2-Real deployment of simulation learnt control policy onto real manipulator directly without any adaptation. To the best of our knowledge, this is the first time a Sim-2-Real transfer of RL control policy is deployed on a heavy duty manipulator for 3D position tracking in real-world. The controller performs well in tracking circular and helical trajectories both in simulation and real-world experiments. + +# II. SYSTEM DESCRIPTION + +The Autonomous Log Ordering through Robotic Grasping (AutoLOG) manipulator, which is a 1:5 scaled-down model of an actual forest forwarder crane, is used as a test-bed for autonomous manipulation tasks (see Fig 1). The manipulator is powered using hydraulic cylinders for its joint motions. With controllable 5 degrees of freedom, the EE can be controlled for its 3D position and yaw angle, making the manipulator redundant in nature. However, for our learning-based control task, we omit the yaw component and only focus on the 3D position of the EE. The yaw parameter of the system is application dependent, such as aligning the yaw with respect to the log orientation for pick-and-place tasks. A description of manipulator configuration is shown in Fig 2. + +![](images/63c3b1d87892ad4296b482afa68b8dcc1c2a1f5e2da7c3c4954380f200625a47.jpg) +Fig. 2. Manipulator description: The figure displays the kinematic configuration of the manipulator. The manipulator has 4 revolute and 1 prismatic joint. All the joints in addition to the grapple are actuated using hydraulic cylinders. + +1) Joint Angles / Displacement Measurements: For forest crane actuators, direct access to the inputs and outputs is not always available. Hence we retrofit our manipulator with exteroceptive sensors. The manipulator comprises three revolute and one prismatic joint. Just before the revolute joint 4, we have 2 orthogonal underactuated joints, which cause the gripper to sway freely in a 3D space. The joint states for revolute joints 2, 3, and prismatic joint are obtained by mapping the cylinder displacements to joint angles. We use Waycon SX50 draw-wire sensors to measure the cylinder displacements with a measurement error of $0.0002\mathrm{mm}$ over a displacement of $1250\mathrm{mm}$ . For revolute joint 1, we use a retrofitted inductance-based angular position sensor which provides absolute angle measurement with a maximum measurement error of 0.8 degrees [12]. +2) Electric Control Valves: For autonomous control of the manipulator, the hydraulic proportional valves (electro-hydraulic) are controlled using a Pulse Width Modulation (PWM) control which changes the fluid flow in cylinders according to required joint values. +3) Requirements for Approach: Our proposed method requires minimal system information. Table I lists the inputs and outputs of our proposed approach. + +TABLEI RL AGENT - INPUTS AND OUTPUTS + +
ParametersInputsOutput
Joint valuesqtqt+1
Current EE positionXt
Target EE positionXt+1
+ +# III. SIMULATION FRAMEWORK + +We use CoppeliaSim (formerly V-REP) [13] as our simulation framework to train the RL agent. CoppeliaSim provides a wide range of functionalities and supports multiple physics engines including Bullet [14], ODE [15], Vortex [16] and Newton [17]. The simulation scene is generated using a Computer Aided Design (CAD) model of the manipulator. The scene is dynamically enabled using Bullet 2.78 physics engine to render our simulation. The simulator provides a kinematics calculation module to compute forward and inverse kinematics of the manipulator chain, however we only use the position information of the scene objects (joints and end-effector) for our observations. Observations from the simulator can be considered as measurement from our retrofitted sensors on the real system. To control the manipulator we use a python remote API Client. + +The simulation model is shown in Fig 3. The gripper (red object) is detached for simulations. Thus our simulation setup does not have the cylinder displacements as control inputs. Instead, the joint variables are provided directly to the simulator. However, the resulting joint variables from the learned controller are converted to cylinder displacements using the actuator network. + +![](images/1918b898feb71111018760b82ae484f8dc74128bde4d2b4ff142827a7d7a3e3b.jpg) +Fig. 3. Simulation model in CoppeliaSim. The gripper (red object) is detached for training since the effects of gripper sway is out of the scope of the proposed approach. + +# IV. METHODS + +# A. Overview + +Fig 4 shows an overview of our approach. We train an actuator and a forward network using supervised learning. The actuator network incorporates the non-linear dynamics involved in the hydraulic actuation and is trained to map cylinder displacement to joint variables and vice-versa. The forward network is a mapping from joint space to operation space of the manipulator. The RL agent (DDPG) is then trained in the simulation to reach a target 3D position from a random initial joint configuration. The trained RL agent is first evaluated for a trajectory tracking task in simulation and then is deployed on the real manipulator for final validation. + +![](images/24de990de12ca065df1ad0249823ff8cc13ebce5db9034346c97011e859835da.jpg) +Sim-2-Real + +![](images/a592265274bdaf56173f88c5d5586737e10ee36a9263ef57f9b9fcd2cf99bb42.jpg) +Fig. 4. RL control architecture: The image shows an architecture overview of our proposed approach. The training is done completely offline on a simulation platform, it shows the interaction between forward network, RL agent and the simulation platform. Sim-2-Real transfer of the trained controller is validated by directly deploying it on the physical system. + +# B. Network Modelling + +The two supervised learning networks facilitate our approach of learning based control. + +1) Actuator Network: The actuator network performs a bi-directional mapping between cylinder displacements and joint variables. Our RL agent outputs joint variables for the target goal, whereas low-level manipulator control takes valve commands (cylinder displacements) as control inputs. +2) Forward Network: The forward network takes current joint variables as inputs and returns the 3D position of EE. + +# C. Data Collection + +1) Actuator Network Data: For the actuator network, we collected input-output data from the real system during manual operation. We recorded the cylinder displacements using the retrofitted draw-wire sensors, and a motion capture system is used to measure the respective angles, since our system does not have an alternative for direct angle measurement for revolute joints 2 and 3. The cylinder control inputs were provided using a remote control designed for the manipulator. The data is collected with different cylinder velocities to capture the hydraulic actuation dynamics effectively. The collected data is believed to be incorporating all the non-linear dynamics involved in the mapping between cylinders and respective angles, see Fig 5. + +![](images/bc475e9736badced79ff37cd0d7b6f2c30391f27b24b10ea198d63521742775c.jpg) + +![](images/a8a1f8bf9611d114565d29e224f0c989988432c99613cb7176c03ee1f699d59d.jpg) +Fig. 5. Real data to train the actuator model is gathered from the physical manipulator. The figure shows the joint angles w.r.t. the cylinder displacement. The cylinder displacements are measured using a draw-wire sensor and corresponding joint angles are recorded using motion capture system. + +2) Forward Network Data: To train the forward network, we acquired the joint variables and EE position data autonomously by setting a random joint configuration for each data point and recording the EE position using motion capture, as shown in Fig 6. The collected data also gave an insight into the manipulator work-space. + +# D. Network Training + +1) Actuator Network Training: We train separate networks for each joint-cylinder mapping. Actuator network-2 + +![](images/0ff172a1e9706049a3ab566b4fa2dff61b7fbc3e448f9a68de3e2c9da7881ed9.jpg) + +![](images/dc245afab5ffe8c91cbbb34839c3c74d38ee032740331754c65ec1f3448037f5.jpg) +Fig. 6. The image displays the real data recorded for forward kinematics in an autonomous fashion, to train our forward network. For each data-point random joint configuration is set and corresponding EE position is recorded using motion capture system. The sampled random configurations covers the full range of cylinder displacements. + +maps joint2-cylinder2, while actuator network-3 maps joint3-cylinder3. The actuator network-2 is trained using a simple multi-layer perceptron (MLP) with 3 hidden layers (with 256-128-128 hidden units) and non-linear rectified linear unit (ReLU) activation. We used Adam optimizer with a learning rate of 1e-4. The model predicts the cylinder position for a given joint angle. Whereas the actuator network-3 uses an MLP with only 2 hidden layers(with 128-128 hidden units). Fig 7 and 8 shows the validation of trained actuator networks. + +![](images/89769b68188785d41523fe5e36a853662b531cf8c3afc7b6b5218adb3a2c22b3.jpg) + +![](images/959b617bf95a2fc023392858e28016f6decfa4ef0d154fc7e850bb39d842b26d.jpg) +Fig. 7. The figure shows validation results of trained actuator network for joint 2. Excluding error spikes at few instances the network precisely learned the cylinder-joint mapping. + +2) Forward Network Training: Our forward network is a multi-input-multi-output (MIMO) mapping from joint variables to EE position. The network is trained using an MLP with only 2 hidden layers (with 256-128 hidden units). Despite training the network on only 500 data points, Fig 9 shows that the generalization is very accurate with a maximum prediction error of only (0.0159, 0.0205, 0.0136)m in x, y, and z, respectively. + +![](images/be285313dbfb81aa70b092241b850e1b5bb5c496a3fcb32cd71290df131f0e58.jpg) + +![](images/c735e05f9610377714b0e6df852a1b7937f1d8bbd2000a99a2aaf4e48ae95ded.jpg) +Fig. 8. Validation results of the trained actuator network for joint 3 are shown in this figure. + +![](images/19833e38b857d9913051ebc6cc3f4945e35d8a0150770467823b8209669f8666.jpg) + +![](images/bd54f262f80aa7631a79e5f18caf190a7f560cf3957649dcad75541910accba9.jpg) + +![](images/0de451a910d927eb07699ac09f4a7436643b76c64c8749231ddd37eb061202c5.jpg) +Fig. 9. Figure shows evaluation of the forward network. With a multi-input structure involving 4 joint variables, the network trained very efficiently to return 3D position of the EE. + +# E. Reinforcement Learning Controller + +Our proposed learning-based controller uses RL to synthesize a model-free task-space position tracking controller. The RL controller learns the inverse kinematics of the manipulator, which cannot be formulated analytically without any optimization objectives due to the redundant nature of the manipulator. + +Reinforcement Learning Preliminaries: + +We formalized our RL problem as a Markov decision process (MDP), which is a discrete-time stochastic control process. We use MDP, which provides a mathematical framework for predicting outcomes where the environment is fully observable. The MDP is characterized by, + +- state $(s)$ : state of the agent in the environment +- action $(a)$ : predicted/ conducted action by the agent +- reward $(r)$ : a scalar valued reward based on performed action and achieved state +- policy $(\pi(s|a))$ : decision making function of state-action pair + +A simple actor-critic architecture is shown in Fig 10. + +At a given discrete time step $t$ , the state of the system is given by, $s_t \in S$ . The agent makes an observation of the environment $o_t \in O$ . Performing an action $a_t \in A$ according to the policy distribution $\pi(a|s)$ , the agent receives an immediate scalar reward $r_t(s_t, a_t)$ according to the specified reward function $R(s, a)$ providing an updated state $s_{t+1}' \in S$ . + +![](images/200646669cbdd5936b36353cd83b3d28e8d93a92f7ee0ed87039448f6357d32d.jpg) +Fig. 10. A simple architecture of actor-critic method of RL approach is described. It shows the main operation of any RL based algorithm, with state, action, reward, agent and environment being the main components of an RL algorithm. + +The goal of RL algorithms is to find the optimal policy $\pi^{*}(a|s)$ , such that the agent takes the optimal action at any given state in order to maximize the expected return. Here, the deep RL approach involves parameterizing the policy $\pi$ as a neural network $\pi(\theta)$ with parameters $\theta \in \Theta$ . The resulting policy approximator outputs a vector of actuator-space control signals at each time step. + +We use DDPG [18] because it combines both Q-learning and policy optimization approaches. DDPG has an actor-critic architecture, where the critic network determines the Q value, and the actor network determines the actions to be taken. The actor network in DDPG simply uses the negative average Q value generated by the critic model as a loss and learns to generate actions to maximize the Q value in each state. An experience replay buffer stores all the experiences and draws a batch to train the networks. To the DDPG baseline, we added feedback to the predicted actions using our forward network for efficient exploration. Using the current policy, we predict a specified number of actions, which is then fed to the forward network to find the best actions based on the norm distance between EE position from predicted actions and the target position. The selected action is then used to perform a $(s_t, a_t, r_t, s_{t+1}')$ step to get the next state $s_{t+1}'$ . The contents of our system state and actions are described in Table II. + +TABLE II DDPG ALGORITHM COMPONENTS + +
ParametersContentsDimension
StateObservation: Joint variables4x1
Achieved goal: Current EE3x1
Desired goal: Target EE3x1
ActionsJoint Variables: [J1, J2, J3, J4]4x1
+ +We give a constant reward $r_t^{step}$ for each time-step which improves the learning performance. A distance reward $r_t^{dist}$ which helps to learn the reaching task is given based on the norm distance between current and target EE position. We also add a joint limit avoidance reward $r_t^{jlim}$ which discourages the agent from learning infeasible joint configurations. A complete episode reward is the sum of all the aforementioned reward functions. + +Our reward function is defined as follows, + +$$ +r _ {t} = r _ {\mathrm {t}} ^ {\text {s t e p}} + r _ {\mathrm {t}} ^ {\text {d i s t}} + r _ {\mathrm {t}} ^ {\text {j l i m}} \tag {1} +$$ + +where, + +$$ +\begin{array}{l} r _ {t} ^ {s t e p} = 0. 0 0 1 \\ r _ {t} ^ {d i s t} = - \left(\left\| x _ {t + 1} - x _ {t} \right\| _ {2}\right) + 0. 0 0 2 \\ r _ {t} ^ {j l i m} = \left\{ \begin{array}{l l} - 0. 0 0 0 5, & \text {i f , j > j _ {m a x} o r j < j _ {m i n}} \\ 0, & \text {i f , j _ {m i n} \geq j \leq j _ {m a x}} \end{array} \right. \\ \end{array} +$$ + +We start the simulation with random initial joint configuration, and our RL agent acquires observations from the simulation environment which forms our system state. The actor network then generates random actions based on the current state and exploration noise. These actions (joint variables as control inputs) are then filtered using the forward network to select the best action, which is then carried out in a simulation step. The simulation is carried out at $100\mathrm{hz}$ , the same rate our real system is operated. + +The DDPG algorithm is claimed to be sensitive to hyperparameters, which we observed during tuning of the hyperparameters. In [19], it is shown that DDPG with tuned hyperparameters outperforms several other policy optimization algorithms in stable environments. We modified the hyperparameters from the stable baseline parameters to suit our training environment. Table III shows the hyperparameters used for our system. + +TABLE III ALGORITHM HYPERPARAMETERS + +
ParametersVariableValues
Number of episodesn Episodes1500
Number of stepsn Steps1000
Buffer sizenbuffer1e + 06
Batch sizenbatch1024
Discount factorγ0.99
Soft target updateτ1e - 03
Actor learning ratelrac1e - 03
Critic learning ratelrcr1e - 03
OU Noiseσ0.1
+ +Our complete approach is described in Algorithm 1. + +# V. RESULTS + +# A. Simulation Results + +We trained two different policies, Policy 1 (with action feedback) and Policy 2 (without action feedback) for 1500 episodes with randomly sampled targets from the manipulator work-space. All the hyper-parameters and simulation parameters are kept identical, with the feedback to the explored actions being the only distinction between the two policies. + +Fig 11 shows the cumulative reward for both the policies during the training episodes. Policy 1 constantly acquires better rewards than Policy 2 for each episode, validating our approach of efficient exploration using action feedback. + +We validated both policies for tracking a helical trajectory. From the trajectory tracking results shown in Fig 12, it can be seen that the tracking accuracy for policy 1 is better than policy 2. The absolute tracking error is shown in 13. The Root Mean Squared Error (RMSE) for Policy 1 is + +![](images/eae163ea51a695c45611d0e685ac65e614c7797a6b91db3920d1340686ed9b34.jpg) +Fig. 11. Episode training rewards for Policy 1 (with feedback) and Policy 2 (without feedback) are shown. It is evident from the figure that Policy 1 is exploring efficiently because of the provided feedback. The feedback assists in selecting a meaningful action exploration. + +[0.017, 0.008, 0.01], whereas for Policy 2 is [0.031, 0.017, 0.02]. Though we trained our RL agent within a defined actual manipulator work-space, we observed that the learned policy generalized the target reaching task and could perform trajectory tracking even outside the work-space on which it is trained. + +![](images/1766108b659a76a1a9c6a84cf6d74f5cb68b2a8323ec8343f166cc285ac067f1.jpg) +Fig. 12. In this figure we are comparing the 2 trained policies on a trajectory tracking task in simulation. Policy 1 performs better in tracking the helical trajectory in contrast to Policy 2. + +![](images/282b6dce16d75d3572a07c67522f78d93b534fc7ee04ddcb62da68ef11c01a0d.jpg) +Fig. 13. The image shows the trajectory tracking error for Policy 1 and Policy 2. + +# B. Real World Experiments + +We deployed Policy 1 directly on the real manipulator without any modifications to the outputs from the learned + +TABLE IV TRAJECTORY TRACKING ERRORS + +
ExperimentMax. Error (mm)
Simulation27.2, 14.5, 26.3
Real-World75.2, 80.1, 73.1
+ +policy. We validated our learning-based controller approach in real-world experiments by tracking circular and helical trajectories. + +As we trained our control policy by detaching the gripper in simulation, we did not account for the dynamic sway of the gripper when the manipulator is in motion, which is currently out of the scope of our proposed approach. The real-world trajectory tracking experiments shows that the manipulator is successfully tracking the target trajectory, see Fig 14, however, the unmodeled and unaccounted sway induced some tracking errors during the motion see 15. Table IV shows the maximum tracking error for the helical trajectory in simulation and real-world using our learned controller. + +![](images/2f78940a880e512f37387b0be3e6118bcf2d143d524f78be3a4bc1f6b80a1417.jpg) +Fig. 14. The figure displays the real-world experiment results of trajectory tracking from deployed Policy 1 onto the manipulator. The tracking is performed well given the harsh dynamic conditions of our system. The results validate the Sim-2-Real transfer of our learning-based control approach. + +![](images/a5ae58c57153f6df9ae9bd5a86843a5a760c1b857b6f492a818443fb6613131c.jpg) +Fig. 15. The image visualizes the error in tracking of helical trajectory for real-world experiments. As foreseen for the same trajectory, the tracking error of the real experiment is bigger compared to simulation tracking error. However, the results are closely comparable. The periodic error is caused by the motion in the y-axis which causes the most sway motion of the gripper. + +Algorithm 1: DDPG with Action Feedback +Initialization: +Randomly initialize both actor and critic networks, + $\mu (s|\theta^{\mu})\gets \theta^{\mu}$ $Q(s,a|\theta^{Q})\gets \theta^{Q}$ +Initialize target networks $Q^{\prime}$ and $\mu^{\prime}$ - + $\theta^{\mathcal{Q}^{\prime}}\leftarrow \theta^{\mathcal{Q}},\theta^{\mu^{\prime}}\leftarrow \theta^{\mu}$ +Initialize replay buffer +Training: +for $n = 1,n_{\text{episodes}}$ do Reset environment +Receive initial observation state $S$ +for $t = 1,n_{\text{steps}}$ do for $p = 1,n_{\text{actions}}$ do $a_{p} = \mu (s_{t}|\theta^{\mu}) + \mathcal{N}_{t}$ / $\star$ according to current policy and exploration noise $\star /$ return $[a_{p_1},\dots ,a_{p_n}]$ BestAction $(Fk_{net},a_p)$ : return $\leftarrow a_{p_i},|min(x_{target} - x_{p_i})$ Set $a_t\gets a_{p_i}$ Execute action: $a_{t}$ Observe: reward $r_t$ and new state $s_{t + 1}$ Store transition: $(s_t,a_t,r_t,s_{t + 1})$ in $R$ Sample random batch: from nbatch transitions $(s_i,a_i,r_i,s_{t + i})$ from $R$ Set: $y_{i} = r_{i} + \gamma Q^{\prime}(s_{i + 1},\mu^{\prime}(s_{i + 1}|\theta^{\mu^{\prime}})|\theta^{Q^{\prime}})$ Update critic by minimizing the loss: $L = \frac{1}{N}\sum_{i}(y_{i} - Q(s_{i},a_{i}|\theta^{Q}))^{2}$ Update actor policy using sampled policy gradient: $\nabla_{\theta^{\mu}}J\approx$ $\frac{1}{N}\sum_{i}\nabla_{a}Q(s,a|\theta^{Q})|_{s=s_{i},a=\mu(s_{i})}\nabla_{\theta^{\mu}}\mu(s|\theta^{\mu})|_{s_i}$ Update target networks, $\theta^{Q^{\prime}}\gets \tau \theta^{Q} + (1 - \tau)\theta^{Q^{\prime}}$ $\theta^{\mu^{\prime}}\gets \tau \theta^{\mu} + (1 - \tau)\theta^{\mu^{\prime}}$ + +# VI. CONCLUSIONS AND FUTURE WORK + +The presented results demonstrate the direct application of RL to heavy-duty manipulators and the feasibility of directly deploying a control policy entirely learned in simulation to physical forestry cranes. The main advantage of the presented approach is that no mathematical formulation either of kinematics or dynamics is required. For our approach, we do not need the geometry information to acquire the cylinder-joint mapping which is required for the automation of such manipulators. Our approach inherently adapts the actuation dynamics which in general is a complex problem involving numerous external factors. Our controller requires minimal system information which can be easily acquired by retrofitting the manipulator, thus making the automation of such heavy manipulators very efficient and economical. We + +made use of the available information in a simple and elegant approach to make the controller learning process much more efficient by providing feedback on the exploration actions and choosing the best one among the action candidates. However, our real-world experiment results suffered from tracking errors, mainly due intervening dynamic factors (gripper sway, backlash, actuation inaccuracies) and poorly tuned low-level control. In contrast to these intervening factors and given the fact that our controller is trained only on 1500 data points sampled from the complete manipulator trajectory, the tracking accuracy is remarkable. The controller performance can be greatly improved, by training on more data points and providing a finely tuned low-level controller. + +To address the problem of gripper sway, in future work we will extend our framework to integrate the sway motion during the learning process to model a compensating or aggressive control policy. We also plan to incorporate a generalized Long Short Term Memory (LSTM) based backlash model, to also take the backlash motion into account during training. Even though our feedback model facilitates the controller in an efficient exploration and learning, it still contains minor inaccuracies which might be affecting the learning process. A better feedback model will undoubtedly improve the controller performance. For more complex manipulation tasks we plan to use curriculum learning [20], which has been shown to accelerate and improve the learning process. + +# REFERENCES + +[1] P. La Hera and D. O. Morales, “What do we observe when we equip a forestry crane with motion sensors?” Croatian Journal of Forest Engineering: Journal for Theory and Application of Forestry Engineering, vol. 40, no. 2, pp. 259–280, 2019. +[2] P. L. Hera, U. Mettin, I. R. Manchester, and A. Shiriaev, "Identification and control of a hydraulic forestry crane," IFAC Proceedings Volumes, vol. 41, no. 2, pp. 2306-2311, 2008, 17th IFAC World Congress. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1474667016392941 +[3] P. La Hera and D. Ortiz Morales, "Model-based development of control systems for forestry cranes," Journal of Control Science and Engineering, vol. 2015, 2015. +[4] S. Fodor, C. Vázquez, and L. Freidovich, "Automation of slewing motions for forestry cranes," in 2015 15th International Conference on Control, Automation and Systems (ICCAS), 2015, pp. 796-801. +[5] V. Mnih, K. Kavukcuoglu, D. Silver, A. A. Rusu, J. Veness, M. G. Bellemare, A. Graves, M. Riedmiller, A. K. Fidjeland, G. Ostrovski, S. Petersen, C. Beattie, A. Sadik, I. Antonoglou, H. King, D. Kumaran, D. Wierstra, S. Legg, and D. Hassabis, "Human-level control through deep reinforcement learning," Nature, vol. 518, no. 7540, pp. 529-533, Feb 2015. [Online]. Available: https://doi.org/10.1038/nature14236 +[6] W. Dabney, M. Rowland, M. Bellemare, and R. Munos, "Distributional reinforcement learning with quantile regression," in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 32, no. 1, 2018. +[7] V. Mnih, A. P. Badia, M. Mirza, A. Graves, T. Lillicrap, T. Harley, D. Silver, and K. Kavukcuoglu, "Asynchronous methods for deep reinforcement learning," in International conference on machine learning. PMLR, 2016, pp. 1928-1937. +[8] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov, “Proximal policy optimization algorithms,” arXiv preprint arXiv:1707.06347, 2017. +[9] J. Schulman, S. Levine, P. Abbeel, M. Jordan, and P. Moritz, "Trust region policy optimization," in Proceedings of the 32nd International Conference on Machine Learning, ser. Proceedings of Machine Learning Research, F. Bach and D. Blei, Eds., vol. 37. Lille, France: PMLR, 07-09 Jul 2015, pp. 1889-1897. [Online]. Available: https://proceedings.mlr.press/v37/schulman15.html + +[10] P. Egli and M. Hutter, "Towards rl-based hydraulic excavator automation," in 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2020, pp. 2692-2697. +[11] J. Andersson, K. Bodin, D. Lindmark, M. Servin, and E. Wallin, "Reinforcement learning control of a forestry crane manipulator," in 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2021, pp. 2121-2126. +[12] H. Gietler, C. Stetco, and H. Zangl, "Scalable retrofit angular position sensor system," in 2020 IEEE International Instrumentation and Measurement Technology Conference (I2MTC). IEEE, 2020, pp. 1-6. +[13] E. Rohmer, S. P. Singh, and M. Freese, "V-rep: A versatile and scalable robot simulation framework," in 2013 IEEE/RSJ International Conference on Intelligent Robots and Systems. IEEE, 2013, pp. 1321-1326. +[14] E. Coumans et al., “Bullet real-time physics simulation,” URL http://bulletphysics.org, 2013. +[15] R. Smith et al., "Open dynamics engine," 2007. +[16] CM-Labs, "Vortex studio," CM Labs, 2020. +[17] J. Jerez and A. Suero, “Newton game dynamics,” Open Source Physics Engine, 2008. +[18] T. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra, "Continuous control with deep reinforcement learning," arXiv preprint arXiv:1509.02971, 2015. +[19] P. Henderson, R. Islam, P. Bachman, J. Pineau, D. Precup, and D. Meger, “Deep reinforcement learning that matters,” CoRR, vol. abs/1709.06560, 2017. [Online]. Available: http://arxiv.org/abs/1709.06560 +[20] Y. Bengio, J. Louradour, R. Collobert, and J. Weston, “Curriculum learning,” in Proceedings of the 26th annual international conference on machine learning, 2009, pp. 41–48. \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15714/images/0de451a910d927eb07699ac09f4a7436643b76c64c8749231ddd37eb061202c5.jpg b/data/2025/2504_15xxx/2504.15714/images/0de451a910d927eb07699ac09f4a7436643b76c64c8749231ddd37eb061202c5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ffa1c232ca3b5bf2b84f9b10bd2a4de32085c10e --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/0de451a910d927eb07699ac09f4a7436643b76c64c8749231ddd37eb061202c5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:846197a4b1b29760727ffbf302b5e721bf7ea9595a42af753e19d9503c237260 +size 16811 diff --git a/data/2025/2504_15xxx/2504.15714/images/0ff172a1e9706049a3ab566b4fa2dff61b7fbc3e448f9a68de3e2c9da7881ed9.jpg b/data/2025/2504_15xxx/2504.15714/images/0ff172a1e9706049a3ab566b4fa2dff61b7fbc3e448f9a68de3e2c9da7881ed9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7822cc05c2e645db2bfe3223a5512b5d866008f1 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/0ff172a1e9706049a3ab566b4fa2dff61b7fbc3e448f9a68de3e2c9da7881ed9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc1f680a0e81c076f9aee2416075db95ffc0e7802100f25a695d78f9b9aa9576 +size 25113 diff --git a/data/2025/2504_15xxx/2504.15714/images/1766108b659a76a1a9c6a84cf6d74f5cb68b2a8323ec8343f166cc285ac067f1.jpg b/data/2025/2504_15xxx/2504.15714/images/1766108b659a76a1a9c6a84cf6d74f5cb68b2a8323ec8343f166cc285ac067f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..efa3fa62b86accb02c9882cfe8008e1989a79393 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/1766108b659a76a1a9c6a84cf6d74f5cb68b2a8323ec8343f166cc285ac067f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bd766aed34365352574f1856c462bdd91c31f2d7a2dd309e8246f978cb44525 +size 26474 diff --git a/data/2025/2504_15xxx/2504.15714/images/1918b898feb71111018760b82ae484f8dc74128bde4d2b4ff142827a7d7a3e3b.jpg b/data/2025/2504_15xxx/2504.15714/images/1918b898feb71111018760b82ae484f8dc74128bde4d2b4ff142827a7d7a3e3b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..33300a884372da1590ea21d6ab67954bc79f20ec --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/1918b898feb71111018760b82ae484f8dc74128bde4d2b4ff142827a7d7a3e3b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dd012bcd10e3d0a732791c11f0464d5632b9d522e828c3a4d10023eb79476bf +size 24055 diff --git a/data/2025/2504_15xxx/2504.15714/images/19833e38b857d9913051ebc6cc3f4945e35d8a0150770467823b8209669f8666.jpg b/data/2025/2504_15xxx/2504.15714/images/19833e38b857d9913051ebc6cc3f4945e35d8a0150770467823b8209669f8666.jpg new file mode 100644 index 0000000000000000000000000000000000000000..410148ade0f24cdb933b92f29e6c454358cd8030 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/19833e38b857d9913051ebc6cc3f4945e35d8a0150770467823b8209669f8666.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3094351d62264216b308b268c0ca37b9c24e5e5dbc98c5c76466bc3f1d0056e0 +size 17252 diff --git a/data/2025/2504_15xxx/2504.15714/images/1ef568fffce5dfb948db8e81ebb41714d7215b2de0a59011d3078cd0b7e6aca7.jpg b/data/2025/2504_15xxx/2504.15714/images/1ef568fffce5dfb948db8e81ebb41714d7215b2de0a59011d3078cd0b7e6aca7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a5729df6f1d3a1d6d9098b0d0e7fe125b4cae9c9 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/1ef568fffce5dfb948db8e81ebb41714d7215b2de0a59011d3078cd0b7e6aca7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:683ac82c347e66dba579d8f4b64f38fb0c1973d9f8f75428cb0e45f3c1da9a08 +size 10569 diff --git a/data/2025/2504_15xxx/2504.15714/images/200646669cbdd5936b36353cd83b3d28e8d93a92f7ee0ed87039448f6357d32d.jpg b/data/2025/2504_15xxx/2504.15714/images/200646669cbdd5936b36353cd83b3d28e8d93a92f7ee0ed87039448f6357d32d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04fee71186743e0676387d6af9be105cb50fa175 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/200646669cbdd5936b36353cd83b3d28e8d93a92f7ee0ed87039448f6357d32d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2a0792878ba14613ef36cbe15398d1b4671fab7cca331eaa1fa6859a77ee56f +size 12951 diff --git a/data/2025/2504_15xxx/2504.15714/images/24de990de12ca065df1ad0249823ff8cc13ebce5db9034346c97011e859835da.jpg b/data/2025/2504_15xxx/2504.15714/images/24de990de12ca065df1ad0249823ff8cc13ebce5db9034346c97011e859835da.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f4bf9a6f1e57a8ef1625b75763d7b18a87fa44b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/24de990de12ca065df1ad0249823ff8cc13ebce5db9034346c97011e859835da.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dcdda37945ae4d3895a1ecf965c0cc6a1da12614dc248a86191e3f92a2372a1 +size 21903 diff --git a/data/2025/2504_15xxx/2504.15714/images/282b6dce16d75d3572a07c67522f78d93b534fc7ee04ddcb62da68ef11c01a0d.jpg b/data/2025/2504_15xxx/2504.15714/images/282b6dce16d75d3572a07c67522f78d93b534fc7ee04ddcb62da68ef11c01a0d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e40fe3a7f82ea51ca1d1a8bab29e5adeb689a79 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/282b6dce16d75d3572a07c67522f78d93b534fc7ee04ddcb62da68ef11c01a0d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad0f5eceaf4090e408a382aae718cd6e2e2fede3ec5ed03aaaf454772a552e63 +size 52350 diff --git a/data/2025/2504_15xxx/2504.15714/images/2f78940a880e512f37387b0be3e6118bcf2d143d524f78be3a4bc1f6b80a1417.jpg b/data/2025/2504_15xxx/2504.15714/images/2f78940a880e512f37387b0be3e6118bcf2d143d524f78be3a4bc1f6b80a1417.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46da6f30554ee15bc294f35749a08b5f240c6a8d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/2f78940a880e512f37387b0be3e6118bcf2d143d524f78be3a4bc1f6b80a1417.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77634c970fd50989e69e81b85f720bd2baae9852122f33783163f7b47c405cb2 +size 48902 diff --git a/data/2025/2504_15xxx/2504.15714/images/456b6813ef49bd30421e1cdced8dc30bcdf85592d5a2c70c29cb5a80ed7b3e07.jpg b/data/2025/2504_15xxx/2504.15714/images/456b6813ef49bd30421e1cdced8dc30bcdf85592d5a2c70c29cb5a80ed7b3e07.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2da266d10818a9923bf28027b4d25525c37f297c --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/456b6813ef49bd30421e1cdced8dc30bcdf85592d5a2c70c29cb5a80ed7b3e07.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:893d275d397b6c2863961106738a09c896b9feaa744fd78cdeef6482f36b9c0d +size 14410 diff --git a/data/2025/2504_15xxx/2504.15714/images/4fb788b966b4cbf24b9bed55df7e54d87ba97af5539771e810076f2e2fb5dd17.jpg b/data/2025/2504_15xxx/2504.15714/images/4fb788b966b4cbf24b9bed55df7e54d87ba97af5539771e810076f2e2fb5dd17.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a045eb5a6639fab69b1e2e0a84ce19f55f6b43d5 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/4fb788b966b4cbf24b9bed55df7e54d87ba97af5539771e810076f2e2fb5dd17.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f02a2e4078384850c9607e2d9e0e0b6468d95f70293a3cfa5dc04576770b499e +size 12681 diff --git a/data/2025/2504_15xxx/2504.15714/images/50a129a1473f93925858127e715013f22fb426c9ed9b317a9fba1f66608fdc8e.jpg b/data/2025/2504_15xxx/2504.15714/images/50a129a1473f93925858127e715013f22fb426c9ed9b317a9fba1f66608fdc8e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e5b732718688d71389caa3555a1c75a433e28690 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/50a129a1473f93925858127e715013f22fb426c9ed9b317a9fba1f66608fdc8e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b9a15112c823f1121f6326ce3cf0c11998dcd30ae2caecf3916e7046c46c197 +size 75441 diff --git a/data/2025/2504_15xxx/2504.15714/images/63c3b1d87892ad4296b482afa68b8dcc1c2a1f5e2da7c3c4954380f200625a47.jpg b/data/2025/2504_15xxx/2504.15714/images/63c3b1d87892ad4296b482afa68b8dcc1c2a1f5e2da7c3c4954380f200625a47.jpg new file mode 100644 index 0000000000000000000000000000000000000000..abef27261b4b5d62c900458ca438b4a6dfde6cc2 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/63c3b1d87892ad4296b482afa68b8dcc1c2a1f5e2da7c3c4954380f200625a47.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5f59e69f49542e2b8e9db61979665489c5de61ac9753b079f797abb1d154c7e +size 31651 diff --git a/data/2025/2504_15xxx/2504.15714/images/89769b68188785d41523fe5e36a853662b531cf8c3afc7b6b5218adb3a2c22b3.jpg b/data/2025/2504_15xxx/2504.15714/images/89769b68188785d41523fe5e36a853662b531cf8c3afc7b6b5218adb3a2c22b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aad5f3806fd243379b476a41b34eb3e401e5e80e --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/89769b68188785d41523fe5e36a853662b531cf8c3afc7b6b5218adb3a2c22b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:236533ccb5d3f3df4ea45a0e8d2f69ac3ae2b542adcdb3a25046a1f559378744 +size 25640 diff --git a/data/2025/2504_15xxx/2504.15714/images/959b617bf95a2fc023392858e28016f6decfa4ef0d154fc7e850bb39d842b26d.jpg b/data/2025/2504_15xxx/2504.15714/images/959b617bf95a2fc023392858e28016f6decfa4ef0d154fc7e850bb39d842b26d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c8a91d93da9c614f5f479ae4e13495f0f5cda38c --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/959b617bf95a2fc023392858e28016f6decfa4ef0d154fc7e850bb39d842b26d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2e05d03064737451fe92a666aae1f3c23e25be204c5c6bda4be3ccf7b0909df +size 17039 diff --git a/data/2025/2504_15xxx/2504.15714/images/a592265274bdaf56173f88c5d5586737e10ee36a9263ef57f9b9fcd2cf99bb42.jpg b/data/2025/2504_15xxx/2504.15714/images/a592265274bdaf56173f88c5d5586737e10ee36a9263ef57f9b9fcd2cf99bb42.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e688e247cdd389bd3f3d38847bca534cf414215b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/a592265274bdaf56173f88c5d5586737e10ee36a9263ef57f9b9fcd2cf99bb42.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:490add7020915cf9e312f285c079b4a3f1e11215bbe48c546fe177b5b710a799 +size 20254 diff --git a/data/2025/2504_15xxx/2504.15714/images/a5ae58c57153f6df9ae9bd5a86843a5a760c1b857b6f492a818443fb6613131c.jpg b/data/2025/2504_15xxx/2504.15714/images/a5ae58c57153f6df9ae9bd5a86843a5a760c1b857b6f492a818443fb6613131c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..abc75525d197503a56db526ac0c553b392601395 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/a5ae58c57153f6df9ae9bd5a86843a5a760c1b857b6f492a818443fb6613131c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a03e4eed3419048c2f78b9d7ada8f240d2a1f89bbbb708a2f51cbf0d1cffc00 +size 40677 diff --git a/data/2025/2504_15xxx/2504.15714/images/a8a1f8bf9611d114565d29e224f0c989988432c99613cb7176c03ee1f699d59d.jpg b/data/2025/2504_15xxx/2504.15714/images/a8a1f8bf9611d114565d29e224f0c989988432c99613cb7176c03ee1f699d59d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8fccd3429f3caf38d6bb32ca638dcdf0deacef5b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/a8a1f8bf9611d114565d29e224f0c989988432c99613cb7176c03ee1f699d59d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b7930946284dfcaa684576a7053c204eeef10c1d244ca7f8cfdc8c7fe674751 +size 21441 diff --git a/data/2025/2504_15xxx/2504.15714/images/bc475e9736badced79ff37cd0d7b6f2c30391f27b24b10ea198d63521742775c.jpg b/data/2025/2504_15xxx/2504.15714/images/bc475e9736badced79ff37cd0d7b6f2c30391f27b24b10ea198d63521742775c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69141e51e884256517e6cee8bc11f7c22b8e5a60 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/bc475e9736badced79ff37cd0d7b6f2c30391f27b24b10ea198d63521742775c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82d99703736d69d8d4c7c5612de6dd7476b81375e42ee330e21c637154e5d6b8 +size 21584 diff --git a/data/2025/2504_15xxx/2504.15714/images/bd54f262f80aa7631a79e5f18caf190a7f560cf3957649dcad75541910accba9.jpg b/data/2025/2504_15xxx/2504.15714/images/bd54f262f80aa7631a79e5f18caf190a7f560cf3957649dcad75541910accba9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7785462c43fa85a4dde469304a0d504ca6b2a733 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/bd54f262f80aa7631a79e5f18caf190a7f560cf3957649dcad75541910accba9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f66a1a2bb935fc7f42742abefce95b39885be1c97521ba6640a0ab4af5eb4e29 +size 17118 diff --git a/data/2025/2504_15xxx/2504.15714/images/be285313dbfb81aa70b092241b850e1b5bb5c496a3fcb32cd71290df131f0e58.jpg b/data/2025/2504_15xxx/2504.15714/images/be285313dbfb81aa70b092241b850e1b5bb5c496a3fcb32cd71290df131f0e58.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b65af10ea70d1934fabba1d22da3541262360ab --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/be285313dbfb81aa70b092241b850e1b5bb5c496a3fcb32cd71290df131f0e58.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90cb77be0b00982e3c995bc84a15cc26f4d8109403164aeaeb13b0cf53744642 +size 23520 diff --git a/data/2025/2504_15xxx/2504.15714/images/c735e05f9610377714b0e6df852a1b7937f1d8bbd2000a99a2aaf4e48ae95ded.jpg b/data/2025/2504_15xxx/2504.15714/images/c735e05f9610377714b0e6df852a1b7937f1d8bbd2000a99a2aaf4e48ae95ded.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb8c218a0562f2313111f5b73e21e7be2a52efb1 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/c735e05f9610377714b0e6df852a1b7937f1d8bbd2000a99a2aaf4e48ae95ded.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc004ba4e51a0eba4cba66769b28a8149ce1cbf9698774fd88d6776c9abb5617 +size 16257 diff --git a/data/2025/2504_15xxx/2504.15714/images/dc245afab5ffe8c91cbbb34839c3c74d38ee032740331754c65ec1f3448037f5.jpg b/data/2025/2504_15xxx/2504.15714/images/dc245afab5ffe8c91cbbb34839c3c74d38ee032740331754c65ec1f3448037f5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2651dcf0ded6987ae003f3d123321a26ca8189e --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/dc245afab5ffe8c91cbbb34839c3c74d38ee032740331754c65ec1f3448037f5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9c4a3462fa30d0db86dfc5ae59e791d2d84b9172c60692e79b959d27ba209de +size 28508 diff --git a/data/2025/2504_15xxx/2504.15714/images/e9067ffa6fcec049c98ce6e2558a2c9d88fcc80d7336f3d6e92a6d0437b8f735.jpg b/data/2025/2504_15xxx/2504.15714/images/e9067ffa6fcec049c98ce6e2558a2c9d88fcc80d7336f3d6e92a6d0437b8f735.jpg new file mode 100644 index 0000000000000000000000000000000000000000..41c516d92bd1b4b5516c547c04a024bf8c8f4dbf --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/e9067ffa6fcec049c98ce6e2558a2c9d88fcc80d7336f3d6e92a6d0437b8f735.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a072f85d9b73afc1d9d68213691c637eb2df375c3c7681bc5af97fe28b87f080 +size 3711 diff --git a/data/2025/2504_15xxx/2504.15714/images/e9a0c0fe0799e18c262aeb1a0ffd97db46212cd8b4e8d774269a4d11c46f6469.jpg b/data/2025/2504_15xxx/2504.15714/images/e9a0c0fe0799e18c262aeb1a0ffd97db46212cd8b4e8d774269a4d11c46f6469.jpg new file mode 100644 index 0000000000000000000000000000000000000000..591e55d94ee481c8e4f8734f9f73cdf1728db37b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/e9a0c0fe0799e18c262aeb1a0ffd97db46212cd8b4e8d774269a4d11c46f6469.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d435196c00ade3e3dcb5ed38bea00aaa5f48f17191bdac381a2bc75281eff786 +size 36492 diff --git a/data/2025/2504_15xxx/2504.15714/images/eae163ea51a695c45611d0e685ac65e614c7797a6b91db3920d1340686ed9b34.jpg b/data/2025/2504_15xxx/2504.15714/images/eae163ea51a695c45611d0e685ac65e614c7797a6b91db3920d1340686ed9b34.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87ede69f4b7474fc33a685646679c500625f6f3d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/eae163ea51a695c45611d0e685ac65e614c7797a6b91db3920d1340686ed9b34.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff9d44ed2be42a85b2ce36ae38e05445b508cd215b91a46e1a7d1f7c9e07b414 +size 28415 diff --git a/data/2025/2504_15xxx/2504.15714/images/eb870b6c3001c7d2105bed50b781bb7405a0fd0d2b68ffdd67a1ee8ccb5aa83a.jpg b/data/2025/2504_15xxx/2504.15714/images/eb870b6c3001c7d2105bed50b781bb7405a0fd0d2b68ffdd67a1ee8ccb5aa83a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d2e53941b1ef2c83bb7f851ffc50a323d6250597 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/images/eb870b6c3001c7d2105bed50b781bb7405a0fd0d2b68ffdd67a1ee8ccb5aa83a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76d5b2becd06564abab1ad41c0cdd3798df311e5311f1b0e0a51de5bf11c78a7 +size 24907 diff --git a/data/2025/2504_15xxx/2504.15714/layout.json b/data/2025/2504_15xxx/2504.15714/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..84471c7ca361b5b8dbadeae754db6a706f1db35e --- /dev/null +++ b/data/2025/2504_15xxx/2504.15714/layout.json @@ -0,0 +1,6430 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 75, + 69, + 537, + 109 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 69, + 537, + 109 + ], + "spans": [ + { + "bbox": [ + 75, + 69, + 537, + 109 + ], + "type": "text", + "content": "Autonomous Control of Redundant Hydraulic Manipulator Using Reinforcement Learning with Action Feedback" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 137, + 557, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 137, + 557, + 152 + ], + "spans": [ + { + "bbox": [ + 50, + 137, + 557, + 152 + ], + "type": "text", + "content": "Rohit Dhakate1, Christian Brommer1, Christoph Böhm1, Harald Gietler2, Stephan Weiss1, and Jan Steinbrener1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 185, + 299, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 185, + 299, + 435 + ], + "spans": [ + { + "bbox": [ + 52, + 185, + 299, + 435 + ], + "type": "text", + "content": "Abstract- This article presents an entirely data-driven approach for autonomous control of redundant manipulators with hydraulic actuation. The approach only requires minimal system information, which is inherited from a simulation model. The non-linear hydraulic actuation dynamics are modeled using actuator networks from the data gathered during the manual operation of the manipulator to effectively emulate the real system in a simulation environment. A neural network control policy for autonomous control, based on end-effector (EE) position tracking is then learned using Reinforcement Learning (RL) with Ornstein-Uhlenbeck process noise (OUNoise) for efficient exploration. The RL agent also receives feedback based on supervised learning of the forward kinematics which facilitates selecting the best suitable action from exploration. The control policy directly provides the joint variables as outputs based on provided target EE position while taking into account the system dynamics. The joint variables are then mapped to the hydraulic valve commands, which are then fed to the system without further modifications. The proposed approach is implemented on a scaled hydraulic forwarder crane with three revolute and one prismatic joint to track the desired position of the EE in 3-Dimensional (3D) space. With the emulated dynamics and extensive learning in simulation, the results demonstrate the feasibility of deploying the learned controller directly on the real system." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 134, + 445, + 216, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 445, + 216, + 456 + ], + "spans": [ + { + "bbox": [ + 134, + 445, + 216, + 456 + ], + "type": "text", + "content": "I. INTRODUCTION" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 464, + 299, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 464, + 299, + 559 + ], + "spans": [ + { + "bbox": [ + 50, + 464, + 299, + 559 + ], + "type": "text", + "content": "Hydraulic cranes are versatile heavy-duty manipulators that are omnipresent in construction, mining, agriculture, or forestry for lifting and transporting heavy objects. Automation by sensor retrofitting of these manipulators tackles not only challenging and dull, dangerous, dirty (DDD) tasks concerning the handling of raw materials but also brings economic benefits by increased productivity, and effortless system upgrades according to the desired functionality." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 559, + 299, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 559, + 299, + 632 + ], + "spans": [ + { + "bbox": [ + 50, + 559, + 299, + 632 + ], + "type": "text", + "content": "With the proposed approach, we are addressing the forest log transportation use case. The manipulator repeatedly performs a monotonous pick-and-place operation to collect and redistribute logs prepared by the harvester. Forwarder cranes mainly remain manually operated, despite continuous widespread automation in the industry. Manual operation" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 313, + 180, + 557, + 352 + ], + "blocks": [ + { + "bbox": [ + 313, + 180, + 557, + 352 + ], + "lines": [ + { + "bbox": [ + 313, + 180, + 557, + 352 + ], + "spans": [ + { + "bbox": [ + 313, + 180, + 557, + 352 + ], + "type": "image", + "image_path": "50a129a1473f93925858127e715013f22fb426c9ed9b317a9fba1f66608fdc8e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 361, + 559, + 381 + ], + "lines": [ + { + "bbox": [ + 309, + 361, + 559, + 381 + ], + "spans": [ + { + "bbox": [ + 309, + 361, + 559, + 381 + ], + "type": "text", + "content": "Fig. 1. AutoLOG manipulator (1:5 scaled forest forwarder crane): Test-bed for our RL-based controller and manipulation tasks." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 309, + 392, + 558, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 392, + 558, + 464 + ], + "spans": [ + { + "bbox": [ + 309, + 392, + 558, + 464 + ], + "type": "text", + "content": "of such manipulators can be both mentally and physically exhausting, when producing constant, smooth and jerk free motion with joystick, since it requires complex coordination of several hydraulic cylinders [1]. Early automatic and semi-automatic solutions were presented by [2] using analytical methods." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 309, + 464, + 559, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 464, + 559, + 632 + ], + "spans": [ + { + "bbox": [ + 309, + 464, + 559, + 632 + ], + "type": "text", + "content": "The barriers in automation of the forest industry can be traced not only towards complex and dynamic environments but also the complexity and variants of the manipulators depending on the manufacturer. In [1] the authors argue that the automation of the entire forwarding operation is complex as numerous tasks such as log recognition, log grasping point detection/selection and pick-and-place operations are involved. However, the authors conclude that the motion patterns of the manipulator's joints are, as expected, highly repetitive and can be automated using analytical methods. However, for analytical methods, an accurate system and environment model is of utmost necessity to achieve desired results, which could take a lot of effort and time given the complexity and redundant nature of the manipulator." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 309, + 632, + 559, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 632, + 559, + 715 + ], + "spans": [ + { + "bbox": [ + 309, + 632, + 559, + 715 + ], + "type": "text", + "content": "Recent advancements in reinforcement learning not only demonstrated their applications in video-games and simulations but also enabled physical robots to learn complex skills and perform operations in real-world environments. In robot manipulation, reinforcement learning is being extensively used to develop intelligent systems that only require minimal to no system and environment information." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 642, + 299, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 642, + 299, + 734 + ], + "spans": [ + { + "bbox": [ + 50, + 642, + 299, + 734 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 50, + 642, + 299, + 734 + ], + "type": "text", + "content": "Rohit Dhakate, Christian Brommer, Christoph Böhm, Stephan Weiss and Jan Steinbrenner are with the Department of Smart Systems Technologies in the Control of Networked Systems Group, University of Klagenfurt, 9020 Klagenfurt, Austria {rohit.dhakate, christian.brommer, christoph.boehm, stephan.weiss, jan.steinbrenner}@ieee.org \n" + }, + { + "bbox": [ + 50, + 642, + 299, + 734 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 50, + 642, + 299, + 734 + ], + "type": "text", + "content": "Harald Gietler is with the Department of Smart Systems Technologies in the Sensors and Actuators Group, University of Klagenfurt, 9020 Klagenfurt, Austria {harald.gietler}@aau.at \nPre-print version, accepted June/2022, DOI follows ASAP ©IEEE." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 208, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.15714v1 [cs.RO] 22 Apr 2025" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 53, + 123, + 63 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 53, + 123, + 63 + ], + "spans": [ + { + "bbox": [ + 51, + 53, + 123, + 63 + ], + "type": "text", + "content": "A. Related Work" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 68, + 299, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 68, + 299, + 247 + ], + "spans": [ + { + "bbox": [ + 50, + 68, + 299, + 247 + ], + "type": "text", + "content": "Autonomous control for forest cranes has been extensively researched for the last two decades. In [3], the authors modeled the system dynamics using differential equations and applied non-linear control laws, and then performed a calibration and control tuning. While [4] also focuses on the aspect of forest crane automation, in addition to compensation for actuator nonlinearities, their main focus is on automating only the base joint (slewing motion). Until recently, all the work done towards automating forest cranes relied on model-based control. Current advancements in artificial intelligence (AI) brought substantial simplifications and advantages in tackling complex systems and problems. Within AI, RL algorithms that can be developed in a model-free domain have attracted several researchers and drove the field of automating heavy machinery with the use of AI." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 247, + 299, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 247, + 299, + 569 + ], + "spans": [ + { + "bbox": [ + 50, + 247, + 299, + 569 + ], + "type": "text", + "content": "Several RL algorithms have been proposed to solve dynamic physical models in recent years. Among which model-free algorithms gained keen interest due to their nature of generalizing a solution to a category of a problem. In model-free methods, Q-learning based algorithms such as Deep Q-Network(DQN) [5], Quantile Regression DQN (QR-DQN) [6], learns the action-value function " + }, + { + "bbox": [ + 50, + 247, + 299, + 569 + ], + "type": "inline_equation", + "content": "Q(s,a)" + }, + { + "bbox": [ + 50, + 247, + 299, + 569 + ], + "type": "text", + "content": " which is the expected value (cumulative discounted reward) of doing an action " + }, + { + "bbox": [ + 50, + 247, + 299, + 569 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 50, + 247, + 299, + 569 + ], + "type": "text", + "content": " in state " + }, + { + "bbox": [ + 50, + 247, + 299, + 569 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 50, + 247, + 299, + 569 + ], + "type": "text", + "content": " and then following the optimal policy, which is deterministic. Whereas Policy optimization-based algorithms such as Policy gradients, Advantage Actor-Critic (A2C)/ Asynchronous Advantage Actor-Critic (A3C) [7], Proximal Policy Optimization (PPO) [8], and Trust Region Policy Optimization (TRPO) [9], the agent learns directly the policy function that maps state to action. The policy is determined without using a value function. In recent years the application of RL for complex manipulation tasks has been carried out by several researchers. In [10] the authors implemented a TRPO algorithm for automating a hydraulic excavator. The learned control policy is validated by deploying it on the actual excavator. However, they do not control the base joint which limits the motion in 2D. The authors in [11] use PPO for learning a control policy, along with curriculum learning for grasping tasks. An energy optimization goal is also added in the reward function. However, the validation of the learned policy is conducted on the same simulation platform on which it had been trained." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 575, + 119, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 575, + 119, + 586 + ], + "spans": [ + { + "bbox": [ + 51, + 575, + 119, + 586 + ], + "type": "text", + "content": "B. Contribution" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 590, + 299, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 590, + 299, + 685 + ], + "spans": [ + { + "bbox": [ + 50, + 590, + 299, + 685 + ], + "type": "text", + "content": "To the best of our knowledge, we present the first work for automation of a real forestry crane with artificial intelligence. Our work investigates the feasibility of applying an actuator-space control policy learned in simulation on a real-world, 4 degrees of freedom, kinematically redundant forestry crane manipulator. The learned control policy maps task-space goals directly to actuator-space commands by providing the target's cartesian position." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 686, + 299, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 686, + 299, + 734 + ], + "spans": [ + { + "bbox": [ + 50, + 686, + 299, + 734 + ], + "type": "text", + "content": "We propose a generalized framework for autonomous control of redundant manipulators with highly non-linear hydraulic actuation. The main contributions of the proposed work are listed below," + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 320, + 53, + 558, + 315 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 320, + 53, + 558, + 123 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 53, + 558, + 123 + ], + "spans": [ + { + "bbox": [ + 320, + 53, + 558, + 123 + ], + "type": "text", + "content": "- Fully data driven approach for position tracking controller of redundant hydraulic manipulator, with minimal system information, negating the need for analytical formulation of forward and inverse kinematics, which is a highly complex task with non-standard manipulators and is subject to change with manipulator models." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 320, + 124, + 558, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 124, + 558, + 172 + ], + "spans": [ + { + "bbox": [ + 320, + 124, + 558, + 172 + ], + "type": "text", + "content": "- Emulated hydraulic actuation dynamics to precisely map from cylinder displacement to joint angles and vice-versa, eliminating the need for formulating the cylinder-joint mapping using geometry." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 320, + 172, + 558, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 172, + 558, + 219 + ], + "spans": [ + { + "bbox": [ + 320, + 172, + 558, + 219 + ], + "type": "text", + "content": "- Improvement on baseline RL controller, with feedback to predicted actions from forward kinematics network using supervised learning, which directly outputs valve commands for the required target EE position." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 320, + 220, + 558, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 220, + 558, + 315 + ], + "spans": [ + { + "bbox": [ + 320, + 220, + 558, + 315 + ], + "type": "text", + "content": "- A Sim-2-Real deployment of simulation learnt control policy onto real manipulator directly without any adaptation. To the best of our knowledge, this is the first time a Sim-2-Real transfer of RL control policy is deployed on a heavy duty manipulator for 3D position tracking in real-world. The controller performs well in tracking circular and helical trajectories both in simulation and real-world experiments." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 367, + 322, + 501, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 322, + 501, + 333 + ], + "spans": [ + { + "bbox": [ + 367, + 322, + 501, + 333 + ], + "type": "text", + "content": "II. SYSTEM DESCRIPTION" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 309, + 337, + 559, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 337, + 559, + 494 + ], + "spans": [ + { + "bbox": [ + 309, + 337, + 559, + 494 + ], + "type": "text", + "content": "The Autonomous Log Ordering through Robotic Grasping (AutoLOG) manipulator, which is a 1:5 scaled-down model of an actual forest forwarder crane, is used as a test-bed for autonomous manipulation tasks (see Fig 1). The manipulator is powered using hydraulic cylinders for its joint motions. With controllable 5 degrees of freedom, the EE can be controlled for its 3D position and yaw angle, making the manipulator redundant in nature. However, for our learning-based control task, we omit the yaw component and only focus on the 3D position of the EE. The yaw parameter of the system is application dependent, such as aligning the yaw with respect to the log orientation for pick-and-place tasks. A description of manipulator configuration is shown in Fig 2." + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 348, + 511, + 521, + 674 + ], + "blocks": [ + { + "bbox": [ + 348, + 511, + 521, + 674 + ], + "lines": [ + { + "bbox": [ + 348, + 511, + 521, + 674 + ], + "spans": [ + { + "bbox": [ + 348, + 511, + 521, + 674 + ], + "type": "image", + "image_path": "63c3b1d87892ad4296b482afa68b8dcc1c2a1f5e2da7c3c4954380f200625a47.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 682, + 559, + 720 + ], + "lines": [ + { + "bbox": [ + 309, + 682, + 559, + 720 + ], + "spans": [ + { + "bbox": [ + 309, + 682, + 559, + 720 + ], + "type": "text", + "content": "Fig. 2. Manipulator description: The figure displays the kinematic configuration of the manipulator. The manipulator has 4 revolute and 1 prismatic joint. All the joints in addition to the grapple are actuated using hydraulic cylinders." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 61, + 52, + 299, + 352 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 61, + 52, + 299, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 52, + 299, + 255 + ], + "spans": [ + { + "bbox": [ + 61, + 52, + 299, + 255 + ], + "type": "text", + "content": "1) Joint Angles / Displacement Measurements: For forest crane actuators, direct access to the inputs and outputs is not always available. Hence we retrofit our manipulator with exteroceptive sensors. The manipulator comprises three revolute and one prismatic joint. Just before the revolute joint 4, we have 2 orthogonal underactuated joints, which cause the gripper to sway freely in a 3D space. The joint states for revolute joints 2, 3, and prismatic joint are obtained by mapping the cylinder displacements to joint angles. We use Waycon SX50 draw-wire sensors to measure the cylinder displacements with a measurement error of " + }, + { + "bbox": [ + 61, + 52, + 299, + 255 + ], + "type": "inline_equation", + "content": "0.0002\\mathrm{mm}" + }, + { + "bbox": [ + 61, + 52, + 299, + 255 + ], + "type": "text", + "content": " over a displacement of " + }, + { + "bbox": [ + 61, + 52, + 299, + 255 + ], + "type": "inline_equation", + "content": "1250\\mathrm{mm}" + }, + { + "bbox": [ + 61, + 52, + 299, + 255 + ], + "type": "text", + "content": ". For revolute joint 1, we use a retrofitted inductance-based angular position sensor which provides absolute angle measurement with a maximum measurement error of 0.8 degrees [12]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 255, + 299, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 255, + 299, + 316 + ], + "spans": [ + { + "bbox": [ + 61, + 255, + 299, + 316 + ], + "type": "text", + "content": "2) Electric Control Valves: For autonomous control of the manipulator, the hydraulic proportional valves (electro-hydraulic) are controlled using a Pulse Width Modulation (PWM) control which changes the fluid flow in cylinders according to required joint values." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 316, + 299, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 316, + 299, + 352 + ], + "spans": [ + { + "bbox": [ + 61, + 316, + 299, + 352 + ], + "type": "text", + "content": "3) Requirements for Approach: Our proposed method requires minimal system information. Table I lists the inputs and outputs of our proposed approach." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "type": "table", + "bbox": [ + 100, + 388, + 248, + 430 + ], + "blocks": [ + { + "bbox": [ + 113, + 361, + 237, + 383 + ], + "lines": [ + { + "bbox": [ + 113, + 361, + 237, + 383 + ], + "spans": [ + { + "bbox": [ + 113, + 361, + 237, + 383 + ], + "type": "text", + "content": "TABLEI RL AGENT - INPUTS AND OUTPUTS" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 100, + 388, + 248, + 430 + ], + "lines": [ + { + "bbox": [ + 100, + 388, + 248, + 430 + ], + "spans": [ + { + "bbox": [ + 100, + 388, + 248, + 430 + ], + "type": "table", + "html": "
ParametersInputsOutput
Joint valuesqtqt+1
Current EE positionXt
Target EE positionXt+1
", + "image_path": "456b6813ef49bd30421e1cdced8dc30bcdf85592d5a2c70c29cb5a80ed7b3e07.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 95, + 453, + 254, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 95, + 453, + 254, + 464 + ], + "spans": [ + { + "bbox": [ + 95, + 453, + 254, + 464 + ], + "type": "text", + "content": "III. SIMULATION FRAMEWORK" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 471, + 299, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 471, + 299, + 650 + ], + "spans": [ + { + "bbox": [ + 50, + 471, + 299, + 650 + ], + "type": "text", + "content": "We use CoppeliaSim (formerly V-REP) [13] as our simulation framework to train the RL agent. CoppeliaSim provides a wide range of functionalities and supports multiple physics engines including Bullet [14], ODE [15], Vortex [16] and Newton [17]. The simulation scene is generated using a Computer Aided Design (CAD) model of the manipulator. The scene is dynamically enabled using Bullet 2.78 physics engine to render our simulation. The simulator provides a kinematics calculation module to compute forward and inverse kinematics of the manipulator chain, however we only use the position information of the scene objects (joints and end-effector) for our observations. Observations from the simulator can be considered as measurement from our retrofitted sensors on the real system. To control the manipulator we use a python remote API Client." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 650, + 299, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 650, + 299, + 734 + ], + "spans": [ + { + "bbox": [ + 50, + 650, + 299, + 734 + ], + "type": "text", + "content": "The simulation model is shown in Fig 3. The gripper (red object) is detached for simulations. Thus our simulation setup does not have the cylinder displacements as control inputs. Instead, the joint variables are provided directly to the simulator. However, the resulting joint variables from the learned controller are converted to cylinder displacements using the actuator network." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 335, + 49, + 535, + 220 + ], + "blocks": [ + { + "bbox": [ + 335, + 49, + 535, + 220 + ], + "lines": [ + { + "bbox": [ + 335, + 49, + 535, + 220 + ], + "spans": [ + { + "bbox": [ + 335, + 49, + 535, + 220 + ], + "type": "image", + "image_path": "1918b898feb71111018760b82ae484f8dc74128bde4d2b4ff142827a7d7a3e3b.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 229, + 558, + 258 + ], + "lines": [ + { + "bbox": [ + 309, + 229, + 558, + 258 + ], + "spans": [ + { + "bbox": [ + 309, + 229, + 558, + 258 + ], + "type": "text", + "content": "Fig. 3. Simulation model in CoppeliaSim. The gripper (red object) is detached for training since the effects of gripper sway is out of the scope of the proposed approach." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 398, + 284, + 470, + 294 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 398, + 284, + 470, + 294 + ], + "spans": [ + { + "bbox": [ + 398, + 284, + 470, + 294 + ], + "type": "text", + "content": "IV. METHODS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 310, + 307, + 365, + 317 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 307, + 365, + 317 + ], + "spans": [ + { + "bbox": [ + 310, + 307, + 365, + 317 + ], + "type": "text", + "content": "A. Overview" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 326, + 558, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 326, + 558, + 458 + ], + "spans": [ + { + "bbox": [ + 309, + 326, + 558, + 458 + ], + "type": "text", + "content": "Fig 4 shows an overview of our approach. We train an actuator and a forward network using supervised learning. The actuator network incorporates the non-linear dynamics involved in the hydraulic actuation and is trained to map cylinder displacement to joint variables and vice-versa. The forward network is a mapping from joint space to operation space of the manipulator. The RL agent (DDPG) is then trained in the simulation to reach a target 3D position from a random initial joint configuration. The trained RL agent is first evaluated for a trajectory tracking task in simulation and then is deployed on the real manipulator for final validation." + } + ] + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 345, + 480, + 524, + 566 + ], + "blocks": [ + { + "bbox": [ + 345, + 480, + 524, + 566 + ], + "lines": [ + { + "bbox": [ + 345, + 480, + 524, + 566 + ], + "spans": [ + { + "bbox": [ + 345, + 480, + 524, + 566 + ], + "type": "image", + "image_path": "24de990de12ca065df1ad0249823ff8cc13ebce5db9034346c97011e859835da.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 413, + 567, + 454, + 574 + ], + "lines": [ + { + "bbox": [ + 413, + 567, + 454, + 574 + ], + "spans": [ + { + "bbox": [ + 413, + 567, + 454, + 574 + ], + "type": "text", + "content": "Sim-2-Real" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 345, + 575, + 523, + 661 + ], + "blocks": [ + { + "bbox": [ + 345, + 575, + 523, + 661 + ], + "lines": [ + { + "bbox": [ + 345, + 575, + 523, + 661 + ], + "spans": [ + { + "bbox": [ + 345, + 575, + 523, + 661 + ], + "type": "image", + "image_path": "a592265274bdaf56173f88c5d5586737e10ee36a9263ef57f9b9fcd2cf99bb42.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 670, + 558, + 717 + ], + "lines": [ + { + "bbox": [ + 309, + 670, + 558, + 717 + ], + "spans": [ + { + "bbox": [ + 309, + 670, + 558, + 717 + ], + "type": "text", + "content": "Fig. 4. RL control architecture: The image shows an architecture overview of our proposed approach. The training is done completely offline on a simulation platform, it shows the interaction between forward network, RL agent and the simulation platform. Sim-2-Real transfer of the trained controller is validated by directly deploying it on the physical system." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 53, + 146, + 64 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 53, + 146, + 64 + ], + "spans": [ + { + "bbox": [ + 51, + 53, + 146, + 64 + ], + "type": "text", + "content": "B. Network Modelling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 68, + 299, + 92 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 68, + 299, + 92 + ], + "spans": [ + { + "bbox": [ + 50, + 68, + 299, + 92 + ], + "type": "text", + "content": "The two supervised learning networks facilitate our approach of learning based control." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 95, + 299, + 201 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 61, + 95, + 299, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 95, + 299, + 166 + ], + "spans": [ + { + "bbox": [ + 61, + 95, + 299, + 166 + ], + "type": "text", + "content": "1) Actuator Network: The actuator network performs a bi-directional mapping between cylinder displacements and joint variables. Our RL agent outputs joint variables for the target goal, whereas low-level manipulator control takes valve commands (cylinder displacements) as control inputs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 61, + 166, + 299, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 166, + 299, + 201 + ], + "spans": [ + { + "bbox": [ + 61, + 166, + 299, + 201 + ], + "type": "text", + "content": "2) Forward Network: The forward network takes current joint variables as inputs and returns the 3D position of EE." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 51, + 210, + 134, + 220 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 210, + 134, + 220 + ], + "spans": [ + { + "bbox": [ + 51, + 210, + 134, + 220 + ], + "type": "text", + "content": "C. Data Collection" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 225, + 300, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 225, + 300, + 393 + ], + "spans": [ + { + "bbox": [ + 62, + 225, + 300, + 393 + ], + "type": "text", + "content": "1) Actuator Network Data: For the actuator network, we collected input-output data from the real system during manual operation. We recorded the cylinder displacements using the retrofitted draw-wire sensors, and a motion capture system is used to measure the respective angles, since our system does not have an alternative for direct angle measurement for revolute joints 2 and 3. The cylinder control inputs were provided using a remote control designed for the manipulator. The data is collected with different cylinder velocities to capture the hydraulic actuation dynamics effectively. The collected data is believed to be incorporating all the non-linear dynamics involved in the mapping between cylinders and respective angles, see Fig 5." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 66, + 403, + 282, + 472 + ], + "blocks": [ + { + "bbox": [ + 66, + 403, + 282, + 472 + ], + "lines": [ + { + "bbox": [ + 66, + 403, + 282, + 472 + ], + "spans": [ + { + "bbox": [ + 66, + 403, + 282, + 472 + ], + "type": "image", + "image_path": "bc475e9736badced79ff37cd0d7b6f2c30391f27b24b10ea198d63521742775c.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 66, + 477, + 280, + 548 + ], + "blocks": [ + { + "bbox": [ + 66, + 477, + 280, + 548 + ], + "lines": [ + { + "bbox": [ + 66, + 477, + 280, + 548 + ], + "spans": [ + { + "bbox": [ + 66, + 477, + 280, + 548 + ], + "type": "image", + "image_path": "a8a1f8bf9611d114565d29e224f0c989988432c99613cb7176c03ee1f699d59d.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 556, + 299, + 602 + ], + "lines": [ + { + "bbox": [ + 50, + 556, + 299, + 602 + ], + "spans": [ + { + "bbox": [ + 50, + 556, + 299, + 602 + ], + "type": "text", + "content": "Fig. 5. Real data to train the actuator model is gathered from the physical manipulator. The figure shows the joint angles w.r.t. the cylinder displacement. The cylinder displacements are measured using a draw-wire sensor and corresponding joint angles are recorded using motion capture system." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 61, + 614, + 299, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 614, + 299, + 687 + ], + "spans": [ + { + "bbox": [ + 61, + 614, + 299, + 687 + ], + "type": "text", + "content": "2) Forward Network Data: To train the forward network, we acquired the joint variables and EE position data autonomously by setting a random joint configuration for each data point and recording the EE position using motion capture, as shown in Fig 6. The collected data also gave an insight into the manipulator work-space." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 695, + 141, + 706 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 695, + 141, + 706 + ], + "spans": [ + { + "bbox": [ + 51, + 695, + 141, + 706 + ], + "type": "text", + "content": "D. Network Training" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 61, + 710, + 299, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 710, + 299, + 734 + ], + "spans": [ + { + "bbox": [ + 61, + 710, + 299, + 734 + ], + "type": "text", + "content": "1) Actuator Network Training: We train separate networks for each joint-cylinder mapping. Actuator network-2" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 326, + 48, + 541, + 118 + ], + "blocks": [ + { + "bbox": [ + 326, + 48, + 541, + 118 + ], + "lines": [ + { + "bbox": [ + 326, + 48, + 541, + 118 + ], + "spans": [ + { + "bbox": [ + 326, + 48, + 541, + 118 + ], + "type": "image", + "image_path": "0ff172a1e9706049a3ab566b4fa2dff61b7fbc3e448f9a68de3e2c9da7881ed9.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 326, + 121, + 541, + 192 + ], + "blocks": [ + { + "bbox": [ + 326, + 121, + 541, + 192 + ], + "lines": [ + { + "bbox": [ + 326, + 121, + 541, + 192 + ], + "spans": [ + { + "bbox": [ + 326, + 121, + 541, + 192 + ], + "type": "image", + "image_path": "dc245afab5ffe8c91cbbb34839c3c74d38ee032740331754c65ec1f3448037f5.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 201, + 559, + 248 + ], + "lines": [ + { + "bbox": [ + 309, + 201, + 559, + 248 + ], + "spans": [ + { + "bbox": [ + 309, + 201, + 559, + 248 + ], + "type": "text", + "content": "Fig. 6. The image displays the real data recorded for forward kinematics in an autonomous fashion, to train our forward network. For each data-point random joint configuration is set and corresponding EE position is recorded using motion capture system. The sampled random configurations covers the full range of cylinder displacements." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 333, + 267, + 559, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 267, + 559, + 398 + ], + "spans": [ + { + "bbox": [ + 333, + 267, + 559, + 398 + ], + "type": "text", + "content": "maps joint2-cylinder2, while actuator network-3 maps joint3-cylinder3. The actuator network-2 is trained using a simple multi-layer perceptron (MLP) with 3 hidden layers (with 256-128-128 hidden units) and non-linear rectified linear unit (ReLU) activation. We used Adam optimizer with a learning rate of 1e-4. The model predicts the cylinder position for a given joint angle. Whereas the actuator network-3 uses an MLP with only 2 hidden layers(with 128-128 hidden units). Fig 7 and 8 shows the validation of trained actuator networks." + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 326, + 418, + 541, + 488 + ], + "blocks": [ + { + "bbox": [ + 326, + 418, + 541, + 488 + ], + "lines": [ + { + "bbox": [ + 326, + 418, + 541, + 488 + ], + "spans": [ + { + "bbox": [ + 326, + 418, + 541, + 488 + ], + "type": "image", + "image_path": "89769b68188785d41523fe5e36a853662b531cf8c3afc7b6b5218adb3a2c22b3.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 329, + 495, + 541, + 563 + ], + "blocks": [ + { + "bbox": [ + 329, + 495, + 541, + 563 + ], + "lines": [ + { + "bbox": [ + 329, + 495, + 541, + 563 + ], + "spans": [ + { + "bbox": [ + 329, + 495, + 541, + 563 + ], + "type": "image", + "image_path": "959b617bf95a2fc023392858e28016f6decfa4ef0d154fc7e850bb39d842b26d.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 572, + 559, + 601 + ], + "lines": [ + { + "bbox": [ + 309, + 572, + 559, + 601 + ], + "spans": [ + { + "bbox": [ + 309, + 572, + 559, + 601 + ], + "type": "text", + "content": "Fig. 7. The figure shows validation results of trained actuator network for joint 2. Excluding error spikes at few instances the network precisely learned the cylinder-joint mapping." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "bbox": [ + 320, + 625, + 559, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 625, + 559, + 723 + ], + "spans": [ + { + "bbox": [ + 320, + 625, + 559, + 723 + ], + "type": "text", + "content": "2) Forward Network Training: Our forward network is a multi-input-multi-output (MIMO) mapping from joint variables to EE position. The network is trained using an MLP with only 2 hidden layers (with 256-128 hidden units). Despite training the network on only 500 data points, Fig 9 shows that the generalization is very accurate with a maximum prediction error of only (0.0159, 0.0205, 0.0136)m in x, y, and z, respectively." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 67, + 47, + 284, + 118 + ], + "blocks": [ + { + "bbox": [ + 67, + 47, + 284, + 118 + ], + "lines": [ + { + "bbox": [ + 67, + 47, + 284, + 118 + ], + "spans": [ + { + "bbox": [ + 67, + 47, + 284, + 118 + ], + "type": "image", + "image_path": "be285313dbfb81aa70b092241b850e1b5bb5c496a3fcb32cd71290df131f0e58.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 72, + 121, + 284, + 193 + ], + "blocks": [ + { + "bbox": [ + 72, + 121, + 284, + 193 + ], + "lines": [ + { + "bbox": [ + 72, + 121, + 284, + 193 + ], + "spans": [ + { + "bbox": [ + 72, + 121, + 284, + 193 + ], + "type": "image", + "image_path": "c735e05f9610377714b0e6df852a1b7937f1d8bbd2000a99a2aaf4e48ae95ded.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 201, + 299, + 221 + ], + "lines": [ + { + "bbox": [ + 50, + 201, + 299, + 221 + ], + "spans": [ + { + "bbox": [ + 50, + 201, + 299, + 221 + ], + "type": "text", + "content": "Fig. 8. Validation results of the trained actuator network for joint 3 are shown in this figure." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 66, + 225, + 284, + 270 + ], + "blocks": [ + { + "bbox": [ + 66, + 225, + 284, + 270 + ], + "lines": [ + { + "bbox": [ + 66, + 225, + 284, + 270 + ], + "spans": [ + { + "bbox": [ + 66, + 225, + 284, + 270 + ], + "type": "image", + "image_path": "19833e38b857d9913051ebc6cc3f4945e35d8a0150770467823b8209669f8666.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 67, + 272, + 283, + 316 + ], + "blocks": [ + { + "bbox": [ + 67, + 272, + 283, + 316 + ], + "lines": [ + { + "bbox": [ + 67, + 272, + 283, + 316 + ], + "spans": [ + { + "bbox": [ + 67, + 272, + 283, + 316 + ], + "type": "image", + "image_path": "bd54f262f80aa7631a79e5f18caf190a7f560cf3957649dcad75541910accba9.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 78, + 319, + 283, + 370 + ], + "blocks": [ + { + "bbox": [ + 78, + 319, + 283, + 370 + ], + "lines": [ + { + "bbox": [ + 78, + 319, + 283, + 370 + ], + "spans": [ + { + "bbox": [ + 78, + 319, + 283, + 370 + ], + "type": "image", + "image_path": "0de451a910d927eb07699ac09f4a7436643b76c64c8749231ddd37eb061202c5.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 379, + 299, + 407 + ], + "lines": [ + { + "bbox": [ + 50, + 379, + 299, + 407 + ], + "spans": [ + { + "bbox": [ + 50, + 379, + 299, + 407 + ], + "type": "text", + "content": "Fig. 9. Figure shows evaluation of the forward network. With a multi-input structure involving 4 joint variables, the network trained very efficiently to return 3D position of the EE." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 415, + 211, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 415, + 211, + 426 + ], + "spans": [ + { + "bbox": [ + 51, + 415, + 211, + 426 + ], + "type": "text", + "content": "E. Reinforcement Learning Controller" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 430, + 299, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 430, + 299, + 502 + ], + "spans": [ + { + "bbox": [ + 50, + 430, + 299, + 502 + ], + "type": "text", + "content": "Our proposed learning-based controller uses RL to synthesize a model-free task-space position tracking controller. The RL controller learns the inverse kinematics of the manipulator, which cannot be formulated analytically without any optimization objectives due to the redundant nature of the manipulator." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 61, + 502, + 222, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 502, + 222, + 514 + ], + "spans": [ + { + "bbox": [ + 61, + 502, + 222, + 514 + ], + "type": "text", + "content": "Reinforcement Learning Preliminaries:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 514, + 299, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 514, + 299, + 574 + ], + "spans": [ + { + "bbox": [ + 50, + 514, + 299, + 574 + ], + "type": "text", + "content": "We formalized our RL problem as a Markov decision process (MDP), which is a discrete-time stochastic control process. We use MDP, which provides a mathematical framework for predicting outcomes where the environment is fully observable. The MDP is characterized by," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 61, + 576, + 298, + 647 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 61, + 576, + 269, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 576, + 269, + 587 + ], + "spans": [ + { + "bbox": [ + 61, + 576, + 269, + 587 + ], + "type": "text", + "content": "- state " + }, + { + "bbox": [ + 61, + 576, + 269, + 587 + ], + "type": "inline_equation", + "content": "(s)" + }, + { + "bbox": [ + 61, + 576, + 269, + 587 + ], + "type": "text", + "content": ": state of the agent in the environment" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 61, + 588, + 290, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 588, + 290, + 600 + ], + "spans": [ + { + "bbox": [ + 61, + 588, + 290, + 600 + ], + "type": "text", + "content": "- action " + }, + { + "bbox": [ + 61, + 588, + 290, + 600 + ], + "type": "inline_equation", + "content": "(a)" + }, + { + "bbox": [ + 61, + 588, + 290, + 600 + ], + "type": "text", + "content": ": predicted/ conducted action by the agent" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 61, + 601, + 298, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 601, + 298, + 623 + ], + "spans": [ + { + "bbox": [ + 61, + 601, + 298, + 623 + ], + "type": "text", + "content": "- reward " + }, + { + "bbox": [ + 61, + 601, + 298, + 623 + ], + "type": "inline_equation", + "content": "(r)" + }, + { + "bbox": [ + 61, + 601, + 298, + 623 + ], + "type": "text", + "content": ": a scalar valued reward based on performed action and achieved state" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 61, + 624, + 298, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 624, + 298, + 647 + ], + "spans": [ + { + "bbox": [ + 61, + 624, + 298, + 647 + ], + "type": "text", + "content": "- policy " + }, + { + "bbox": [ + 61, + 624, + 298, + 647 + ], + "type": "inline_equation", + "content": "(\\pi(s|a))" + }, + { + "bbox": [ + 61, + 624, + 298, + 647 + ], + "type": "text", + "content": ": decision making function of state-action pair" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 51, + 650, + 272, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 650, + 272, + 662 + ], + "spans": [ + { + "bbox": [ + 51, + 650, + 272, + 662 + ], + "type": "text", + "content": "A simple actor-critic architecture is shown in Fig 10." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "spans": [ + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "text", + "content": "At a given discrete time step " + }, + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "text", + "content": ", the state of the system is given by, " + }, + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "inline_equation", + "content": "s_t \\in S" + }, + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "text", + "content": ". The agent makes an observation of the environment " + }, + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "inline_equation", + "content": "o_t \\in O" + }, + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "text", + "content": ". Performing an action " + }, + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "inline_equation", + "content": "a_t \\in A" + }, + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "text", + "content": " according to the policy distribution " + }, + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "inline_equation", + "content": "\\pi(a|s)" + }, + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "text", + "content": ", the agent receives an immediate scalar reward " + }, + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "inline_equation", + "content": "r_t(s_t, a_t)" + }, + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "text", + "content": " according to the specified reward function " + }, + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "inline_equation", + "content": "R(s, a)" + }, + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "text", + "content": " providing an updated state " + }, + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "inline_equation", + "content": "s_{t+1}' \\in S" + }, + { + "bbox": [ + 50, + 662, + 299, + 734 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 369, + 47, + 502, + 131 + ], + "blocks": [ + { + "bbox": [ + 369, + 47, + 502, + 131 + ], + "lines": [ + { + "bbox": [ + 369, + 47, + 502, + 131 + ], + "spans": [ + { + "bbox": [ + 369, + 47, + 502, + 131 + ], + "type": "image", + "image_path": "200646669cbdd5936b36353cd83b3d28e8d93a92f7ee0ed87039448f6357d32d.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 138, + 559, + 176 + ], + "lines": [ + { + "bbox": [ + 309, + 138, + 559, + 176 + ], + "spans": [ + { + "bbox": [ + 309, + 138, + 559, + 176 + ], + "type": "text", + "content": "Fig. 10. A simple architecture of actor-critic method of RL approach is described. It shows the main operation of any RL based algorithm, with state, action, reward, agent and environment being the main components of an RL algorithm." + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "bbox": [ + 309, + 183, + 558, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 183, + 558, + 266 + ], + "spans": [ + { + "bbox": [ + 309, + 183, + 558, + 266 + ], + "type": "text", + "content": "The goal of RL algorithms is to find the optimal policy " + }, + { + "bbox": [ + 309, + 183, + 558, + 266 + ], + "type": "inline_equation", + "content": "\\pi^{*}(a|s)" + }, + { + "bbox": [ + 309, + 183, + 558, + 266 + ], + "type": "text", + "content": ", such that the agent takes the optimal action at any given state in order to maximize the expected return. Here, the deep RL approach involves parameterizing the policy " + }, + { + "bbox": [ + 309, + 183, + 558, + 266 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 309, + 183, + 558, + 266 + ], + "type": "text", + "content": " as a neural network " + }, + { + "bbox": [ + 309, + 183, + 558, + 266 + ], + "type": "inline_equation", + "content": "\\pi(\\theta)" + }, + { + "bbox": [ + 309, + 183, + 558, + 266 + ], + "type": "text", + "content": " with parameters " + }, + { + "bbox": [ + 309, + 183, + 558, + 266 + ], + "type": "inline_equation", + "content": "\\theta \\in \\Theta" + }, + { + "bbox": [ + 309, + 183, + 558, + 266 + ], + "type": "text", + "content": ". The resulting policy approximator outputs a vector of actuator-space control signals at each time step." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 309, + 267, + 559, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 267, + 559, + 483 + ], + "spans": [ + { + "bbox": [ + 309, + 267, + 559, + 483 + ], + "type": "text", + "content": "We use DDPG [18] because it combines both Q-learning and policy optimization approaches. DDPG has an actor-critic architecture, where the critic network determines the Q value, and the actor network determines the actions to be taken. The actor network in DDPG simply uses the negative average Q value generated by the critic model as a loss and learns to generate actions to maximize the Q value in each state. An experience replay buffer stores all the experiences and draws a batch to train the networks. To the DDPG baseline, we added feedback to the predicted actions using our forward network for efficient exploration. Using the current policy, we predict a specified number of actions, which is then fed to the forward network to find the best actions based on the norm distance between EE position from predicted actions and the target position. The selected action is then used to perform a " + }, + { + "bbox": [ + 309, + 267, + 559, + 483 + ], + "type": "inline_equation", + "content": "(s_t, a_t, r_t, s_{t+1}')" + }, + { + "bbox": [ + 309, + 267, + 559, + 483 + ], + "type": "text", + "content": " step to get the next state " + }, + { + "bbox": [ + 309, + 267, + 559, + 483 + ], + "type": "inline_equation", + "content": "s_{t+1}'" + }, + { + "bbox": [ + 309, + 267, + 559, + 483 + ], + "type": "text", + "content": ". The contents of our system state and actions are described in Table II." + } + ] + } + ], + "index": 21 + }, + { + "type": "table", + "bbox": [ + 326, + 520, + 541, + 574 + ], + "blocks": [ + { + "bbox": [ + 374, + 490, + 494, + 511 + ], + "lines": [ + { + "bbox": [ + 374, + 490, + 494, + 511 + ], + "spans": [ + { + "bbox": [ + 374, + 490, + 494, + 511 + ], + "type": "text", + "content": "TABLE II DDPG ALGORITHM COMPONENTS" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 326, + 520, + 541, + 574 + ], + "lines": [ + { + "bbox": [ + 326, + 520, + 541, + 574 + ], + "spans": [ + { + "bbox": [ + 326, + 520, + 541, + 574 + ], + "type": "table", + "html": "
ParametersContentsDimension
StateObservation: Joint variables4x1
Achieved goal: Current EE3x1
Desired goal: Target EE3x1
ActionsJoint Variables: [J1, J2, J3, J4]4x1
", + "image_path": "eb870b6c3001c7d2105bed50b781bb7405a0fd0d2b68ffdd67a1ee8ccb5aa83a.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "table_body" + } + ], + "index": 23 + }, + { + "bbox": [ + 309, + 587, + 559, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 587, + 559, + 682 + ], + "spans": [ + { + "bbox": [ + 309, + 587, + 559, + 682 + ], + "type": "text", + "content": "We give a constant reward " + }, + { + "bbox": [ + 309, + 587, + 559, + 682 + ], + "type": "inline_equation", + "content": "r_t^{step}" + }, + { + "bbox": [ + 309, + 587, + 559, + 682 + ], + "type": "text", + "content": " for each time-step which improves the learning performance. A distance reward " + }, + { + "bbox": [ + 309, + 587, + 559, + 682 + ], + "type": "inline_equation", + "content": "r_t^{dist}" + }, + { + "bbox": [ + 309, + 587, + 559, + 682 + ], + "type": "text", + "content": " which helps to learn the reaching task is given based on the norm distance between current and target EE position. We also add a joint limit avoidance reward " + }, + { + "bbox": [ + 309, + 587, + 559, + 682 + ], + "type": "inline_equation", + "content": "r_t^{jlim}" + }, + { + "bbox": [ + 309, + 587, + 559, + 682 + ], + "type": "text", + "content": " which discourages the agent from learning infeasible joint configurations. A complete episode reward is the sum of all the aforementioned reward functions." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 320, + 695, + 496, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 695, + 496, + 706 + ], + "spans": [ + { + "bbox": [ + 320, + 695, + 496, + 706 + ], + "type": "text", + "content": "Our reward function is defined as follows," + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 381, + 708, + 558, + 723 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 708, + 558, + 723 + ], + "spans": [ + { + "bbox": [ + 381, + 708, + 558, + 723 + ], + "type": "interline_equation", + "content": "r _ {t} = r _ {\\mathrm {t}} ^ {\\text {s t e p}} + r _ {\\mathrm {t}} ^ {\\text {d i s t}} + r _ {\\mathrm {t}} ^ {\\text {j l i m}} \\tag {1}", + "image_path": "e9067ffa6fcec049c98ce6e2558a2c9d88fcc80d7336f3d6e92a6d0437b8f735.jpg" + } + ] + } + ], + "index": 26 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 61, + 53, + 92, + 63 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 53, + 92, + 63 + ], + "spans": [ + { + "bbox": [ + 61, + 53, + 92, + 63 + ], + "type": "text", + "content": "where," + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 61, + 72, + 258, + 135 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 72, + 258, + 135 + ], + "spans": [ + { + "bbox": [ + 61, + 72, + 258, + 135 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} r _ {t} ^ {s t e p} = 0. 0 0 1 \\\\ r _ {t} ^ {d i s t} = - \\left(\\left\\| x _ {t + 1} - x _ {t} \\right\\| _ {2}\\right) + 0. 0 0 2 \\\\ r _ {t} ^ {j l i m} = \\left\\{ \\begin{array}{l l} - 0. 0 0 0 5, & \\text {i f , j > j _ {m a x} o r j < j _ {m i n}} \\\\ 0, & \\text {i f , j _ {m i n} \\geq j \\leq j _ {m a x}} \\end{array} \\right. \\\\ \\end{array}", + "image_path": "4fb788b966b4cbf24b9bed55df7e54d87ba97af5539771e810076f2e2fb5dd17.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 138, + 299, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 138, + 299, + 245 + ], + "spans": [ + { + "bbox": [ + 50, + 138, + 299, + 245 + ], + "type": "text", + "content": "We start the simulation with random initial joint configuration, and our RL agent acquires observations from the simulation environment which forms our system state. The actor network then generates random actions based on the current state and exploration noise. These actions (joint variables as control inputs) are then filtered using the forward network to select the best action, which is then carried out in a simulation step. The simulation is carried out at " + }, + { + "bbox": [ + 50, + 138, + 299, + 245 + ], + "type": "inline_equation", + "content": "100\\mathrm{hz}" + }, + { + "bbox": [ + 50, + 138, + 299, + 245 + ], + "type": "text", + "content": ", the same rate our real system is operated." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 245, + 299, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 245, + 299, + 342 + ], + "spans": [ + { + "bbox": [ + 50, + 245, + 299, + 342 + ], + "type": "text", + "content": "The DDPG algorithm is claimed to be sensitive to hyperparameters, which we observed during tuning of the hyperparameters. In [19], it is shown that DDPG with tuned hyperparameters outperforms several other policy optimization algorithms in stable environments. We modified the hyperparameters from the stable baseline parameters to suit our training environment. Table III shows the hyperparameters used for our system." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 91, + 380, + 258, + 479 + ], + "blocks": [ + { + "bbox": [ + 117, + 350, + 234, + 372 + ], + "lines": [ + { + "bbox": [ + 117, + 350, + 234, + 372 + ], + "spans": [ + { + "bbox": [ + 117, + 350, + 234, + 372 + ], + "type": "text", + "content": "TABLE III ALGORITHM HYPERPARAMETERS" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 91, + 380, + 258, + 479 + ], + "lines": [ + { + "bbox": [ + 91, + 380, + 258, + 479 + ], + "spans": [ + { + "bbox": [ + 91, + 380, + 258, + 479 + ], + "type": "table", + "html": "
ParametersVariableValues
Number of episodesn Episodes1500
Number of stepsn Steps1000
Buffer sizenbuffer1e + 06
Batch sizenbatch1024
Discount factorγ0.99
Soft target updateτ1e - 03
Actor learning ratelrac1e - 03
Critic learning ratelrcr1e - 03
OU Noiseσ0.1
", + "image_path": "e9a0c0fe0799e18c262aeb1a0ffd97db46212cd8b4e8d774269a4d11c46f6469.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 60, + 491, + 279, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 491, + 279, + 504 + ], + "spans": [ + { + "bbox": [ + 60, + 491, + 279, + 504 + ], + "type": "text", + "content": "Our complete approach is described in Algorithm 1." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 143, + 510, + 206, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 510, + 206, + 521 + ], + "spans": [ + { + "bbox": [ + 143, + 510, + 206, + 521 + ], + "type": "text", + "content": "V. RESULTS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 527, + 144, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 527, + 144, + 538 + ], + "spans": [ + { + "bbox": [ + 50, + 527, + 144, + 538 + ], + "type": "text", + "content": "A. Simulation Results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 542, + 299, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 542, + 299, + 625 + ], + "spans": [ + { + "bbox": [ + 50, + 542, + 299, + 625 + ], + "type": "text", + "content": "We trained two different policies, Policy 1 (with action feedback) and Policy 2 (without action feedback) for 1500 episodes with randomly sampled targets from the manipulator work-space. All the hyper-parameters and simulation parameters are kept identical, with the feedback to the explored actions being the only distinction between the two policies." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 626, + 299, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 626, + 299, + 673 + ], + "spans": [ + { + "bbox": [ + 50, + 626, + 299, + 673 + ], + "type": "text", + "content": "Fig 11 shows the cumulative reward for both the policies during the training episodes. Policy 1 constantly acquires better rewards than Policy 2 for each episode, validating our approach of efficient exploration using action feedback." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 674, + 300, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 674, + 300, + 734 + ], + "spans": [ + { + "bbox": [ + 50, + 674, + 300, + 734 + ], + "type": "text", + "content": "We validated both policies for tracking a helical trajectory. From the trajectory tracking results shown in Fig 12, it can be seen that the tracking accuracy for policy 1 is better than policy 2. The absolute tracking error is shown in 13. The Root Mean Squared Error (RMSE) for Policy 1 is" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 326, + 47, + 542, + 194 + ], + "blocks": [ + { + "bbox": [ + 326, + 47, + 542, + 194 + ], + "lines": [ + { + "bbox": [ + 326, + 47, + 542, + 194 + ], + "spans": [ + { + "bbox": [ + 326, + 47, + 542, + 194 + ], + "type": "image", + "image_path": "eae163ea51a695c45611d0e685ac65e614c7797a6b91db3920d1340686ed9b34.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 201, + 559, + 239 + ], + "lines": [ + { + "bbox": [ + 309, + 201, + 559, + 239 + ], + "spans": [ + { + "bbox": [ + 309, + 201, + 559, + 239 + ], + "type": "text", + "content": "Fig. 11. Episode training rewards for Policy 1 (with feedback) and Policy 2 (without feedback) are shown. It is evident from the figure that Policy 1 is exploring efficiently because of the provided feedback. The feedback assists in selecting a meaningful action exploration." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 246, + 559, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 246, + 559, + 318 + ], + "spans": [ + { + "bbox": [ + 309, + 246, + 559, + 318 + ], + "type": "text", + "content": "[0.017, 0.008, 0.01], whereas for Policy 2 is [0.031, 0.017, 0.02]. Though we trained our RL agent within a defined actual manipulator work-space, we observed that the learned policy generalized the target reaching task and could perform trajectory tracking even outside the work-space on which it is trained." + } + ] + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 349, + 326, + 523, + 457 + ], + "blocks": [ + { + "bbox": [ + 349, + 326, + 523, + 457 + ], + "lines": [ + { + "bbox": [ + 349, + 326, + 523, + 457 + ], + "spans": [ + { + "bbox": [ + 349, + 326, + 523, + 457 + ], + "type": "image", + "image_path": "1766108b659a76a1a9c6a84cf6d74f5cb68b2a8323ec8343f166cc285ac067f1.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 465, + 559, + 495 + ], + "lines": [ + { + "bbox": [ + 309, + 465, + 559, + 495 + ], + "spans": [ + { + "bbox": [ + 309, + 465, + 559, + 495 + ], + "type": "text", + "content": "Fig. 12. In this figure we are comparing the 2 trained policies on a trajectory tracking task in simulation. Policy 1 performs better in tracking the helical trajectory in contrast to Policy 2." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 325, + 513, + 543, + 659 + ], + "blocks": [ + { + "bbox": [ + 325, + 513, + 543, + 659 + ], + "lines": [ + { + "bbox": [ + 325, + 513, + 543, + 659 + ], + "spans": [ + { + "bbox": [ + 325, + 513, + 543, + 659 + ], + "type": "image", + "image_path": "282b6dce16d75d3572a07c67522f78d93b534fc7ee04ddcb62da68ef11c01a0d.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 669, + 559, + 689 + ], + "lines": [ + { + "bbox": [ + 309, + 669, + 559, + 689 + ], + "spans": [ + { + "bbox": [ + 309, + 669, + 559, + 689 + ], + "type": "text", + "content": "Fig. 13. The image shows the trajectory tracking error for Policy 1 and Policy 2." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "bbox": [ + 310, + 694, + 427, + 707 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 694, + 427, + 707 + ], + "spans": [ + { + "bbox": [ + 310, + 694, + 427, + 707 + ], + "type": "text", + "content": "B. Real World Experiments" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 309, + 710, + 559, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 710, + 559, + 734 + ], + "spans": [ + { + "bbox": [ + 309, + 710, + 559, + 734 + ], + "type": "text", + "content": "We deployed Policy 1 directly on the real manipulator without any modifications to the outputs from the learned" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 113, + 77, + 235, + 110 + ], + "blocks": [ + { + "bbox": [ + 117, + 47, + 233, + 68 + ], + "lines": [ + { + "bbox": [ + 117, + 47, + 233, + 68 + ], + "spans": [ + { + "bbox": [ + 117, + 47, + 233, + 68 + ], + "type": "text", + "content": "TABLE IV TRAJECTORY TRACKING ERRORS" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 113, + 77, + 235, + 110 + ], + "lines": [ + { + "bbox": [ + 113, + 77, + 235, + 110 + ], + "spans": [ + { + "bbox": [ + 113, + 77, + 235, + 110 + ], + "type": "table", + "html": "
ExperimentMax. Error (mm)
Simulation27.2, 14.5, 26.3
Real-World75.2, 80.1, 73.1
", + "image_path": "1ef568fffce5dfb948db8e81ebb41714d7215b2de0a59011d3078cd0b7e6aca7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 133, + 299, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 133, + 299, + 169 + ], + "spans": [ + { + "bbox": [ + 50, + 133, + 299, + 169 + ], + "type": "text", + "content": "policy. We validated our learning-based controller approach in real-world experiments by tracking circular and helical trajectories." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 170, + 299, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 170, + 299, + 300 + ], + "spans": [ + { + "bbox": [ + 50, + 170, + 299, + 300 + ], + "type": "text", + "content": "As we trained our control policy by detaching the gripper in simulation, we did not account for the dynamic sway of the gripper when the manipulator is in motion, which is currently out of the scope of our proposed approach. The real-world trajectory tracking experiments shows that the manipulator is successfully tracking the target trajectory, see Fig 14, however, the unmodeled and unaccounted sway induced some tracking errors during the motion see 15. Table IV shows the maximum tracking error for the helical trajectory in simulation and real-world using our learned controller." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 67, + 309, + 284, + 483 + ], + "blocks": [ + { + "bbox": [ + 67, + 309, + 284, + 483 + ], + "lines": [ + { + "bbox": [ + 67, + 309, + 284, + 483 + ], + "spans": [ + { + "bbox": [ + 67, + 309, + 284, + 483 + ], + "type": "image", + "image_path": "2f78940a880e512f37387b0be3e6118bcf2d143d524f78be3a4bc1f6b80a1417.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 491, + 299, + 529 + ], + "lines": [ + { + "bbox": [ + 50, + 491, + 299, + 529 + ], + "spans": [ + { + "bbox": [ + 50, + 491, + 299, + 529 + ], + "type": "text", + "content": "Fig. 14. The figure displays the real-world experiment results of trajectory tracking from deployed Policy 1 onto the manipulator. The tracking is performed well given the harsh dynamic conditions of our system. The results validate the Sim-2-Real transfer of our learning-based control approach." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 67, + 534, + 284, + 679 + ], + "blocks": [ + { + "bbox": [ + 67, + 534, + 284, + 679 + ], + "lines": [ + { + "bbox": [ + 67, + 534, + 284, + 679 + ], + "spans": [ + { + "bbox": [ + 67, + 534, + 284, + 679 + ], + "type": "image", + "image_path": "a5ae58c57153f6df9ae9bd5a86843a5a760c1b857b6f492a818443fb6613131c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 689, + 299, + 735 + ], + "lines": [ + { + "bbox": [ + 50, + 689, + 299, + 735 + ], + "spans": [ + { + "bbox": [ + 50, + 689, + 299, + 735 + ], + "type": "text", + "content": "Fig. 15. The image visualizes the error in tracking of helical trajectory for real-world experiments. As foreseen for the same trajectory, the tracking error of the real experiment is bigger compared to simulation tracking error. However, the results are closely comparable. The periodic error is caused by the motion in the y-axis which causes the most sway motion of the gripper." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "code", + "bbox": [ + 316, + 66, + 553, + 522 + ], + "blocks": [ + { + "bbox": [ + 315, + 53, + 499, + 64 + ], + "lines": [ + { + "bbox": [ + 315, + 53, + 499, + 64 + ], + "spans": [ + { + "bbox": [ + 315, + 53, + 499, + 64 + ], + "type": "text", + "content": "Algorithm 1: DDPG with Action Feedback" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "lines": [ + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "spans": [ + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": "Initialization: \nRandomly initialize both actor and critic networks, \n" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "\\mu (s|\\theta^{\\mu})\\gets \\theta^{\\mu}" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "Q(s,a|\\theta^{Q})\\gets \\theta^{Q}" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " \nInitialize target networks " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "Q^{\\prime}" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "\\mu^{\\prime}" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " - \n" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "\\theta^{\\mathcal{Q}^{\\prime}}\\leftarrow \\theta^{\\mathcal{Q}},\\theta^{\\mu^{\\prime}}\\leftarrow \\theta^{\\mu}" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " \nInitialize replay buffer \nTraining: \nfor " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "n = 1,n_{\\text{episodes}}" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " do Reset environment \nReceive initial observation state " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " \nfor " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "t = 1,n_{\\text{steps}}" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " do for " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "p = 1,n_{\\text{actions}}" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " do " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "a_{p} = \\mu (s_{t}|\\theta^{\\mu}) + \\mathcal{N}_{t}" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " / " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "\\star" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " according to current policy and exploration noise " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "\\star /" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " return " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "[a_{p_1},\\dots ,a_{p_n}]" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " BestAction " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "(Fk_{net},a_p)" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " : return " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "\\leftarrow a_{p_i},|min(x_{target} - x_{p_i})" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " Set " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "a_t\\gets a_{p_i}" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " Execute action: " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "a_{t}" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " Observe: reward " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "r_t" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " and new state " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "s_{t + 1}" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " Store transition: " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "(s_t,a_t,r_t,s_{t + 1})" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " Sample random batch: from nbatch transitions " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "(s_i,a_i,r_i,s_{t + i})" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " Set: " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "y_{i} = r_{i} + \\gamma Q^{\\prime}(s_{i + 1},\\mu^{\\prime}(s_{i + 1}|\\theta^{\\mu^{\\prime}})|\\theta^{Q^{\\prime}})" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " Update critic by minimizing the loss: " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "L = \\frac{1}{N}\\sum_{i}(y_{i} - Q(s_{i},a_{i}|\\theta^{Q}))^{2}" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " Update actor policy using sampled policy gradient: " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "\\nabla_{\\theta^{\\mu}}J\\approx" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "\\frac{1}{N}\\sum_{i}\\nabla_{a}Q(s,a|\\theta^{Q})|_{s=s_{i},a=\\mu(s_{i})}\\nabla_{\\theta^{\\mu}}\\mu(s|\\theta^{\\mu})|_{s_i}" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "text", + "content": " Update target networks, " + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "\\theta^{Q^{\\prime}}\\gets \\tau \\theta^{Q} + (1 - \\tau)\\theta^{Q^{\\prime}}" + }, + { + "bbox": [ + 316, + 66, + 553, + 522 + ], + "type": "inline_equation", + "content": "\\theta^{\\mu^{\\prime}}\\gets \\tau \\theta^{\\mu} + (1 - \\tau)\\theta^{\\mu^{\\prime}}" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "code_body" + } + ], + "index": 9, + "sub_type": "algorithm" + }, + { + "bbox": [ + 333, + 550, + 535, + 561 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 550, + 535, + 561 + ], + "spans": [ + { + "bbox": [ + 333, + 550, + 535, + 561 + ], + "type": "text", + "content": "VI. CONCLUSIONS AND FUTURE WORK" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 309, + 566, + 559, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 566, + 559, + 734 + ], + "spans": [ + { + "bbox": [ + 309, + 566, + 559, + 734 + ], + "type": "text", + "content": "The presented results demonstrate the direct application of RL to heavy-duty manipulators and the feasibility of directly deploying a control policy entirely learned in simulation to physical forestry cranes. The main advantage of the presented approach is that no mathematical formulation either of kinematics or dynamics is required. For our approach, we do not need the geometry information to acquire the cylinder-joint mapping which is required for the automation of such manipulators. Our approach inherently adapts the actuation dynamics which in general is a complex problem involving numerous external factors. Our controller requires minimal system information which can be easily acquired by retrofitting the manipulator, thus making the automation of such heavy manipulators very efficient and economical. We" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 53, + 298, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 53, + 298, + 207 + ], + "spans": [ + { + "bbox": [ + 53, + 53, + 298, + 207 + ], + "type": "text", + "content": "made use of the available information in a simple and elegant approach to make the controller learning process much more efficient by providing feedback on the exploration actions and choosing the best one among the action candidates. However, our real-world experiment results suffered from tracking errors, mainly due intervening dynamic factors (gripper sway, backlash, actuation inaccuracies) and poorly tuned low-level control. In contrast to these intervening factors and given the fact that our controller is trained only on 1500 data points sampled from the complete manipulator trajectory, the tracking accuracy is remarkable. The controller performance can be greatly improved, by training on more data points and providing a finely tuned low-level controller." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 209, + 298, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 209, + 298, + 363 + ], + "spans": [ + { + "bbox": [ + 53, + 209, + 298, + 363 + ], + "type": "text", + "content": "To address the problem of gripper sway, in future work we will extend our framework to integrate the sway motion during the learning process to model a compensating or aggressive control policy. We also plan to incorporate a generalized Long Short Term Memory (LSTM) based backlash model, to also take the backlash motion into account during training. Even though our feedback model facilitates the controller in an efficient exploration and learning, it still contains minor inaccuracies which might be affecting the learning process. A better feedback model will undoubtedly improve the controller performance. For more complex manipulation tasks we plan to use curriculum learning [20], which has been shown to accelerate and improve the learning process." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 146, + 373, + 204, + 383 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 373, + 204, + 383 + ], + "spans": [ + { + "bbox": [ + 146, + 373, + 204, + 383 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 390, + 299, + 734 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 56, + 390, + 299, + 427 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 390, + 299, + 427 + ], + "spans": [ + { + "bbox": [ + 56, + 390, + 299, + 427 + ], + "type": "text", + "content": "[1] P. La Hera and D. O. Morales, “What do we observe when we equip a forestry crane with motion sensors?” Croatian Journal of Forest Engineering: Journal for Theory and Application of Forestry Engineering, vol. 40, no. 2, pp. 259–280, 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 427, + 299, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 427, + 299, + 472 + ], + "spans": [ + { + "bbox": [ + 56, + 427, + 299, + 472 + ], + "type": "text", + "content": "[2] P. L. Hera, U. Mettin, I. R. Manchester, and A. Shiriaev, \"Identification and control of a hydraulic forestry crane,\" IFAC Proceedings Volumes, vol. 41, no. 2, pp. 2306-2311, 2008, 17th IFAC World Congress. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1474667016392941" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 472, + 299, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 472, + 299, + 498 + ], + "spans": [ + { + "bbox": [ + 56, + 472, + 299, + 498 + ], + "type": "text", + "content": "[3] P. La Hera and D. Ortiz Morales, \"Model-based development of control systems for forestry cranes,\" Journal of Control Science and Engineering, vol. 2015, 2015." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 498, + 298, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 498, + 298, + 525 + ], + "spans": [ + { + "bbox": [ + 56, + 498, + 298, + 525 + ], + "type": "text", + "content": "[4] S. Fodor, C. Vázquez, and L. Freidovich, \"Automation of slewing motions for forestry cranes,\" in 2015 15th International Conference on Control, Automation and Systems (ICCAS), 2015, pp. 796-801." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 525, + 298, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 525, + 298, + 589 + ], + "spans": [ + { + "bbox": [ + 56, + 525, + 298, + 589 + ], + "type": "text", + "content": "[5] V. Mnih, K. Kavukcuoglu, D. Silver, A. A. Rusu, J. Veness, M. G. Bellemare, A. Graves, M. Riedmiller, A. K. Fidjeland, G. Ostrovski, S. Petersen, C. Beattie, A. Sadik, I. Antonoglou, H. King, D. Kumaran, D. Wierstra, S. Legg, and D. Hassabis, \"Human-level control through deep reinforcement learning,\" Nature, vol. 518, no. 7540, pp. 529-533, Feb 2015. [Online]. Available: https://doi.org/10.1038/nature14236" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 589, + 298, + 616 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 589, + 298, + 616 + ], + "spans": [ + { + "bbox": [ + 56, + 589, + 298, + 616 + ], + "type": "text", + "content": "[6] W. Dabney, M. Rowland, M. Bellemare, and R. Munos, \"Distributional reinforcement learning with quantile regression,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 32, no. 1, 2018." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 616, + 298, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 616, + 298, + 651 + ], + "spans": [ + { + "bbox": [ + 56, + 616, + 298, + 651 + ], + "type": "text", + "content": "[7] V. Mnih, A. P. Badia, M. Mirza, A. Graves, T. Lillicrap, T. Harley, D. Silver, and K. Kavukcuoglu, \"Asynchronous methods for deep reinforcement learning,\" in International conference on machine learning. PMLR, 2016, pp. 1928-1937." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 651, + 298, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 651, + 298, + 678 + ], + "spans": [ + { + "bbox": [ + 56, + 651, + 298, + 678 + ], + "type": "text", + "content": "[8] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov, “Proximal policy optimization algorithms,” arXiv preprint arXiv:1707.06347, 2017." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 678, + 298, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 678, + 298, + 734 + ], + "spans": [ + { + "bbox": [ + 56, + 678, + 298, + 734 + ], + "type": "text", + "content": "[9] J. Schulman, S. Levine, P. Abbeel, M. Jordan, and P. Moritz, \"Trust region policy optimization,\" in Proceedings of the 32nd International Conference on Machine Learning, ser. Proceedings of Machine Learning Research, F. Bach and D. Blei, Eds., vol. 37. Lille, France: PMLR, 07-09 Jul 2015, pp. 1889-1897. [Online]. Available: https://proceedings.mlr.press/v37/schulman15.html" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 312, + 53, + 558, + 324 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 312, + 53, + 558, + 81 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 53, + 558, + 81 + ], + "spans": [ + { + "bbox": [ + 312, + 53, + 558, + 81 + ], + "type": "text", + "content": "[10] P. Egli and M. Hutter, \"Towards rl-based hydraulic excavator automation,\" in 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2020, pp. 2692-2697." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 312, + 81, + 558, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 81, + 558, + 118 + ], + "spans": [ + { + "bbox": [ + 312, + 81, + 558, + 118 + ], + "type": "text", + "content": "[11] J. Andersson, K. Bodin, D. Lindmark, M. Servin, and E. Wallin, \"Reinforcement learning control of a forestry crane manipulator,\" in 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2021, pp. 2121-2126." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 312, + 118, + 558, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 118, + 558, + 144 + ], + "spans": [ + { + "bbox": [ + 312, + 118, + 558, + 144 + ], + "type": "text", + "content": "[12] H. Gietler, C. Stetco, and H. Zangl, \"Scalable retrofit angular position sensor system,\" in 2020 IEEE International Instrumentation and Measurement Technology Conference (I2MTC). IEEE, 2020, pp. 1-6." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 312, + 144, + 558, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 144, + 558, + 179 + ], + "spans": [ + { + "bbox": [ + 312, + 144, + 558, + 179 + ], + "type": "text", + "content": "[13] E. Rohmer, S. P. Singh, and M. Freese, \"V-rep: A versatile and scalable robot simulation framework,\" in 2013 IEEE/RSJ International Conference on Intelligent Robots and Systems. IEEE, 2013, pp. 1321-1326." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 312, + 179, + 558, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 179, + 558, + 198 + ], + "spans": [ + { + "bbox": [ + 312, + 179, + 558, + 198 + ], + "type": "text", + "content": "[14] E. Coumans et al., “Bullet real-time physics simulation,” URL http://bulletphysics.org, 2013." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 312, + 198, + 488, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 198, + 488, + 206 + ], + "spans": [ + { + "bbox": [ + 312, + 198, + 488, + 206 + ], + "type": "text", + "content": "[15] R. Smith et al., \"Open dynamics engine,\" 2007." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 312, + 206, + 473, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 206, + 473, + 215 + ], + "spans": [ + { + "bbox": [ + 312, + 206, + 473, + 215 + ], + "type": "text", + "content": "[16] CM-Labs, \"Vortex studio,\" CM Labs, 2020." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 312, + 215, + 558, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 215, + 558, + 233 + ], + "spans": [ + { + "bbox": [ + 312, + 215, + 558, + 233 + ], + "type": "text", + "content": "[17] J. Jerez and A. Suero, “Newton game dynamics,” Open Source Physics Engine, 2008." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 312, + 233, + 558, + 259 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 233, + 558, + 259 + ], + "spans": [ + { + "bbox": [ + 312, + 233, + 558, + 259 + ], + "type": "text", + "content": "[18] T. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra, \"Continuous control with deep reinforcement learning,\" arXiv preprint arXiv:1509.02971, 2015." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 312, + 259, + 558, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 259, + 558, + 296 + ], + "spans": [ + { + "bbox": [ + 312, + 259, + 558, + 296 + ], + "type": "text", + "content": "[19] P. Henderson, R. Islam, P. Bachman, J. Pineau, D. Precup, and D. Meger, “Deep reinforcement learning that matters,” CoRR, vol. abs/1709.06560, 2017. [Online]. Available: http://arxiv.org/abs/1709.06560" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 312, + 296, + 558, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 296, + 558, + 324 + ], + "spans": [ + { + "bbox": [ + 312, + 296, + 558, + 324 + ], + "type": "text", + "content": "[20] Y. Bengio, J. Louradour, R. Collobert, and J. Weston, “Curriculum learning,” in Proceedings of the 26th annual international conference on machine learning, 2009, pp. 41–48." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15777/7d52c4ec-83bf-4780-930e-43bf666b3c1c_content_list.json b/data/2025/2504_15xxx/2504.15777/7d52c4ec-83bf-4780-930e-43bf666b3c1c_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..e66043564dc1440bd220940878d0bfa505c01808 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/7d52c4ec-83bf-4780-930e-43bf666b3c1c_content_list.json @@ -0,0 +1,3355 @@ +[ + { + "type": "text", + "text": "Tina: Tiny Reasoning Models via LoRA", + "text_level": 1, + "bbox": [ + 187, + 106, + 774, + 136 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shangshang Wang1, Julian Asilis1, Ömer Faruk Akgül1, Enes Burak Bilgin1, Ollie Liu1, and Willie Neiswanger1", + "bbox": [ + 197, + 148, + 761, + 185 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1University of Southern California", + "bbox": [ + 354, + 194, + 601, + 213 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "How cost-effectively can strong reasoning abilities be achieved in language models? Driven by this fundamental question, we present Tina, a family of tiny reasoning models achieved with high cost-efficiency. Notably, Tina demonstrates that substantial reasoning performance can be developed using only minimal resources, by applying parameter-efficient updates during reinforcement learning (RL), using low-rank adaptation (LoRA), to an already tiny 1.5B parameter base model. This minimalist approach produces models that achieve reasoning performance which is competitive with, and sometimes surpasses, SOTA RL reasoning models built upon the same base model. Crucially, this is achieved at a tiny fraction of the computational post-training cost employed by existing SOTA models. In fact, the best Tina model achieves a $>20\\%$ reasoning performance increase and $43.33\\%$ Pass@1 accuracy on AIME24, at only $9 USD post-training and evaluation cost (i.e., an estimated 260x cost reduction). Our work reveals the surprising effectiveness of efficient RL reasoning via LoRA. We validate this across multiple open-source reasoning datasets and various ablation settings starting with a single, fixed set of hyperparameters. Furthermore, we hypothesize that this effectiveness and efficiency stem from LoRA rapidly adapting the model to the structural format of reasoning rewarded by RL, while largely preserving the base model's underlying knowledge. In service of accessibility and open research, we fully open-source all code, training logs, and model weights & checkpoints.", + "bbox": [ + 109, + 229, + 885, + 478 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Notion Blog: https://shangshangwang.notion.site/tina", + "Code Repository: https://github.com/shangshang-wang/Tina", + "Training Logs: https://wandb.ai/upup-ashton-wang-usc/Tina", + "Model Weights & Checkpoints: https://huggingface.co/Tina-Yi" + ], + "bbox": [ + 112, + 494, + 594, + 561 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 84, + 606, + 240, + 625 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Language models (LMs) demonstrate increasing proficiency across a variety of tasks, but achieving robust, multi-step reasoning remains a frontier challenge (Wang and Neiswanger, 2025, Xu et al., 2025). Notably, such reasoning abilities are crucial for applications demanding complex problem-solving, from scientific discovery to intricate planning. Enhancing complex reasoning via supervised fine-tuning (SFT) is a well-adopted technique, often utilizing a distillation process (Min et al., 2024, Huang et al., 2024) by which the model learns to mimic reasoning traces (e.g., step-by-step thinking) generated by more advanced models such as o1 (OpenAI, 2024). This approach, while effective, relies upon the quality and availability of such expert demonstrations, which can be costly to obtain. Furthermore, it can run the risk of instilling a shallow form of imitation in the learning model, rather than fostering dynamic exploration of reasoning paths. In contrast, reinforcement learning (RL) enables models to learn directly and flexibly from verifiable reward signals derived from curated data (DeepSeek-AI, 2025, Lambert et al., 2025). In doing so, RL can lead the model to explore a greater variety of logical paths and possibly discover more robust solutions. However, RL pipelines are often complex and notoriously resource-intensive, typically involving substantial compute. This raises a fundamental question anchoring our research:", + "bbox": [ + 80, + 638, + 916, + 881 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "How cost-effectively can one perform RL to efficiently instill reasoning abilities in LMs?", + "bbox": [ + 174, + 896, + 826, + 914 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.15777v1 [cs.CL] 22 Apr 2025", + "bbox": [ + 22, + 263, + 60, + 708 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Corresponding author(s): Shangshang Wang shangshangwang.github.io; Willie Neiswanger neiswang@usc.edu", + "bbox": [ + 83, + 944, + 692, + 958 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/c58552ebaf2daf6eabfa675f030cd3a157abeb8ab14396bafc107c507f8547bd.jpg", + "image_caption": [ + "Figure 1: Overall comparison between Tina and baseline models. The Tina model in the figure corresponds to the best checkpoint in Table 10. Reasoning performance denotes the average score across AIME24/25, AMC23, MATH500, GPQA, and Minerva, as described in Section 3. The calculation of each comparative metric is detailed in Appendix A." + ], + "image_footnote": [], + "bbox": [ + 93, + 108, + 295, + 251 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/bf53269452a3162272dd632626aa50af3f1678181b6f9d0ba4c95990a34126d1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 297, + 108, + 500, + 252 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/e7251e5046f2a5f6cb05b876b7fc39023bdfb9a1ab1a6d709a9ce612c7eaab5a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 108, + 702, + 252 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/6c6d44847f89dd7f528dfee2e033953d575eac2bf14e954d1c4d9a807378e5d6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 705, + 107, + 903, + 252 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our pursuit of this question necessitates a deliberate move towards minimalism. Rather than utilizing models with tens of billions of parameters (such as Qwen-7B/32B, QwQ-32B-preview, and their variants (Min et al., 2024, NovaSky Team, 2025, Zeng et al., 2025, Muennighoff et al., 2025, Cui et al., 2025, Lyu et al., 2025, OpenThoughts Team, 2025, Hu et al., 2025)), we instead direct our attention to tiny models. In particular, we use the 1.5B parameter model, DeepSeek-R1-Distill-Qwen-1.5B (DeepSeek-AI, 2025). Our choice of this base model aligns with common practices in recent research (RUCAIBox STILL Team, 2025, Luo et al., 2025, Dang and Ngo, 2025): we begin with a foundation that, owing to its specific lineage (DeepSeek/Qwen) and distillation process, likely possesses stronger initial reasoning aptitude compared to a generic pre-trained model of equivalent size. This strategic starting point allows us to more-rigorously evaluate the incremental reasoning enhancements imparted by RL, thereby isolating and measuring the effectiveness of the technique itself over a competent baseline. More importantly, selecting such an architecture dramatically lowers the computational and financial threshold for experimentation. Complementing the choice of a compact base model, we further amplify efficiency during the RL phase and integrate parameter-efficient post-training by employing low-rank adaptation (LoRA) (Hu et al., 2021). Notably, LoRA enables the modification of a model's behavior by training only an exceptionally small number of new parameters. This dovetails with our central motivation: achieving reasoning capabilities through the most economical means possible.", + "bbox": [ + 84, + 320, + 916, + 595 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Integrating the previous two components—a “tiny” model architecture and a “tiny” post-training via LoRA-based RL—we release the Tina (Tiny Reasoning Models via LoRA) family of models, which attain substantial reasoning performance at strikingly low cost. In total, we summarize our contributions as follows:", + "bbox": [ + 81, + 603, + 915, + 655 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Surprising Effectiveness of Efficient RL Reasoning. We show that our Tina models achieve performance competitive with, and in some cases even superior to, SOTA baseline models built on the same base model with full-parameter training, as shown in Figure 1 and in more detail in Table 3. In particular, the best Tina model achieves a $>20\\%$ performance increase and $43.33\\%$ Pass@1 accuracy on AIME24.", + "- Rapid Reasoning Format Adaptation Hypothesis. Based on our observations in post-training Tina, we hypothesize that LoRA's effectiveness and efficiency stem from rapidly adapting the reasoning format under RL while preserving base model knowledge—a likely more compute-efficient process than the deep knowledge integration of full-parameter training. Partial support comes from studies showing tiny LMs can reason effectively (Hugging Face, 2025, DeepSeek-AI, 2025), while large LMs can store broader world knowledge (Allen-Zhu and Li, 2025). This distinction suggests reasoning capabilities can be significantly enhanced by focusing on adapting the output format itself, consistent with our hypothesis about LoRA. To test this, we exclusively train LoRA parameters in RL settings, focusing on leveraging this format adaptation mechanism." + ], + "bbox": [ + 96, + 662, + 913, + 893 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 611, + 80 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 901, + 946, + 911, + 958 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/f7e24a2670d10d97ac1743803388f0f914b52e3adf5a7d8bb766e74bbc93cbba.jpg", + "image_caption": [ + "Figure 2: Release timeline of open-source models that aim to replicate the performance of advanced reasoning models like o1(-preview) (OpenAI, 2024) and R1 (DeepSeek-AI, 2025), which we refer to as open-source reasoning replicas." + ], + "image_footnote": [], + "bbox": [ + 102, + 104, + 893, + 344 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- Democratizing RL Reasoning. We provide a reproducible and highly cost-effective approach, enabling wider participation in the exploration of RL techniques without requiring extensive computational resources. Notably, the cost of reproducing the best Tina checkpoint stands at only $9, and of reproducing all our experiments and everything presented in this paper from scratch at$ 526. Furthermore, in line with our goal of promoting accessible research, we release all code, training logs, evaluation scripts, and all Tina checkpoints.", + "bbox": [ + 96, + 417, + 916, + 521 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 83, + 544, + 250, + 561 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1. Open-Source Reasoning Replicas", + "text_level": 1, + "bbox": [ + 83, + 579, + 398, + 597 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As shown in Figure 2, following the release of o1-preview (OpenAI, 2024), a number of open-source models have emerged to replicate or exceed its reasoning capabilities. STILL (Min et al., 2024) introduced a minimal yet high-quality training recipe designed to elicit reasoning with modest compute, demonstrating that imitation learning from curated traces remains competitive. Sky-T1 (NovaSky Team, 2025) further explored scaling using open instruction-tuned checkpoints, while SimpleRL (Zeng et al., 2025) highlighted the potential of lightweight RL without requiring large-scale reward models. PRIME (Cui et al., 2025) and DeepScaleR (Luo et al., 2025) introduced process supervision and scaling experiments to isolate how reasoning quality evolves with model size and context length. s1 (Muennighoff et al., 2025) showed that even strong base models such as Qwen2.5-32B-Instruct benefit from fine-tuning on only 1k high-quality and long chain-of-thought data, which is curated to elicit reasoning capabilities. L1 (Aggarwal and Welleck, 2025) combined prompt engineering with data curation for RL, resulting in models that can efficiently and adaptively control their response length. Meanwhile, OREAL (Lyu et al., 2025) and OpenThinker (OpenThoughts Team, 2025) investigated self-correction and latent structure emergence through unsupervised and hybrid paradigms. The release of Open Reasoner Zero (Hu et al., 2025) and Open-RS (Dang and Ngo, 2025) further emphasized efficient RL-based strategies for reasoning with small models, completing a landscape of public alternatives for interpretability and reproducibility.", + "bbox": [ + 81, + 606, + 916, + 882 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 611, + 80 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 901, + 946, + 911, + 958 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. RL with Verifiable Rewards", + "text_level": 1, + "bbox": [ + 83, + 107, + 351, + 125 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Reasoning tasks are well-suited to RL paradigms, as the correctness or quality of the final output often provides verifiable reward signals (e.g., the validity of a logical deduction). Such signal can effectively guide the model towards learning more robust reasoning strategies. Consequently, various RL approaches have been explored within this domain. Certain methods introduce auxiliary reward models or critics to assess reasoning quality, such as ReFT (Luong et al., 2024) and REFINER (Paul et al., 2024). Other techniques employ explicit rule-based verification for self-correction (Wu et al., 2024). Some leverage self-play dynamics and exploration, such as mutual reasoning (Qi et al., 2024), or utilize inference-aware fine-tuning that optimizes performance under different sampling strategies (Chow et al., 2024). Notably, Group Relative Policy Optimization (GRPO) has been proposed as a variant of Proximal Policy Optimization (PPO) which removes the need for a separate value network by using a group-based baseline for advantage estimation, improving training efficiency and leading to better reward alignment (Shao et al., 2024), as demonstrated by DeepSeek-R1 (DeepSeek-AI, 2025). Subsequently, Dr.GRPO (Liu et al., 2025) introduced a subtle modification of GRPO addressing its bias to produce long responses. For completeness, we provide the standard formulation of GRPO in Appendix B.", + "bbox": [ + 81, + 135, + 916, + 359 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.3. Low-Rank Adaptation", + "text_level": 1, + "bbox": [ + 83, + 383, + 307, + 400 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "While most existing open models that enable reasoning rely on the more expensive full-parameter training (Min et al., 2024, NovaSky Team, 2025, Zeng et al., 2025, Muennighoff et al., 2025, Aggarwal and Welleck, 2025, Cui et al., 2025, Luo et al., 2025, Lyu et al., 2025, OpenThoughts Team, 2025, Hu et al., 2025, Dang and Ngo, 2025), we investigate the use of LoRA for parameter-efficient post-training of reasoning models (Hu et al., 2021). Our goal is to assess whether updating only a small fraction of parameters can still yield strong reasoning capabilities (Han et al., 2024). In addition to its computational efficiency, LoRA provides modularity: by training only a low-rank decomposition of the parameter updates, it becomes possible to toggle reasoning behavior without maintaining multiple full model copies. For completeness, we provide the standard formulation of LoRA in Appendix B.", + "bbox": [ + 81, + 411, + 918, + 566 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3. Tina: Tiny Reasoning Models via LoRA", + "text_level": 1, + "bbox": [ + 83, + 592, + 496, + 611 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Tina is our family of models created by post-training the DeepSeek-R1-Distill-Qwen-1.5B base model using LoRA during RL (employing a GRPO-style algorithm). The \"Tiny\" designation encapsulates a deliberate focus on minimalism and efficiency across the entire framework. This encompasses not only the tiny base model architecture and the tiny parameter updates enabled by LoRA, but also extends to a tiny overall resource footprint. This minimized footprint is achieved through an efficient training pipeline leveraging accessible open-source datasets and codebase (detailed in Section 3.1), and requires only minimal hardware and budget resources (described in Section 3.2).", + "bbox": [ + 81, + 625, + 916, + 747 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1. Training Pipeline: Baselines & Datasets", + "text_level": 1, + "bbox": [ + 83, + 770, + 454, + 787 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To facilitate meaningful comparisons and enable precise ablations, we post-train our Tina models via RL using the datasets and setups from publicly available reasoning models. All Tina and baseline models adopt DeepSeek-R1-Distill-Qwen-1.5B as their base model checkpoint with default open-source weights.", + "bbox": [ + 81, + 799, + 916, + 851 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "- STILL-3-1.5B-preview (RUCAIBox STILL Team, 2025) is a slow-thinking reasoning model developed through iterative RL on a curated dataset of $33\\mathrm{k}$ reasoning traces. The data originates from mathematics competitions and includes problems from MATH (Hendrycks et al., 2021, Lightman et al.,", + "bbox": [ + 96, + 862, + 916, + 915 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 611, + 80 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 901, + 948, + 911, + 958 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2023), NuminaMathCoT (LI et al., 2024), and AIME (1983-2023) (Art of Problem Solving, 2024). Tina-STILL-3-1.5B-preview uses the same dataset and reward pipeline.", + "bbox": [ + 112, + 107, + 915, + 142 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- DeepScaleR-1.5B-Preview (Luo et al., 2025) focuses on long-context mathematical reasoning via RL, and is trained over approximately 40k problem-answer pairs drawn from the AIME (Art of Problem Solving, 2024), AMC (Art of Problem Solving, 2023), OMNI-MATH (Gao et al., 2024a), and STILL (RUCAIBox STILL Team, 2025) datasets. Tina-DeepScaleR-1.5B-Preview uses this dataset and mirrors the reward design.", + "- Open-RS1/2/3 (Dang and Ngo, 2025) are three models from the Open-RS project exploring reasoning performance in 1.5B models trained via RL. All Open-RS models are trained on small, high-quality datasets further curated from the s1 (Muennighoff et al., 2025) (i.e., Open-S1) and DeepScaleR (Luo et al., 2025) (i.e., Open-DeepScaleR) datasets. The Tina models (Tina-Open-RS1/2/3) replicate these setups, using identical data splits and reward scaffolding." + ], + "bbox": [ + 96, + 148, + 916, + 328 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2. Training Setup: Infrastructure & Budget", + "text_level": 1, + "bbox": [ + 81, + 352, + 460, + 369 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training Codebase. Our implementation builds upon OpenR1, a fully open reproduction of DeepSeek-R1 (DeepSeek-AI, 2025) which combines the Accelerate (Gugger et al., 2022) and Trl (von Werra et al., 2020) libraries and the DeepSpeed ZeRO optimization (Rajbhandari et al., 2019). It aims to transparently replicate and extend RL methods used for improving reasoning in language models, particularly focusing on aligning model behavior with reasoning-oriented objectives via verifiable reward signals. Our methodology inherits its scaffolding, training utilities, and reward interfaces.", + "bbox": [ + 81, + 382, + 916, + 488 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training Hyperparameters. We initiated parameter selection by replicating key parameters from OpenR1 (Hugging Face, 2025) and OpenRS (Dang and Ngo, 2025). For all experiments presented in this paper, we deliberately adopted the default or recommended hyperparameter configurations provided in their works. These settings were kept largely fixed across different runs (Table 5). For the main Tina results (Section 4.2), only reward function parameters were adjusted per task, and for ablation studies (Section 4.3), only the specific factor under investigation (e.g., learning rate, LoRA rank/alpha, RL algorithm) was varied (Table 6). This approach intentionally circumvents costly hyperparameter search procedures for our specific setup, ensuring negligible tuning overhead and focusing on the efficacy of the core LoRA-based RL methodology.", + "bbox": [ + 81, + 494, + 928, + 633 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training Hardware. A key element of our low-cost approach was minimizing the hardware footprint. While distributed RL training algorithms like GRPO often benefit from using three or more GPUs (e.g., dedicating one GPU to an inference engine such as vLLM for faster sample generation), we deliberately targeted a minimal setup using only two NVIDIA L40S GPUs. To enable this, we co-located the RL training process and the vLLM on the same two GPUs by constraining vLLM's GPU memory usage. The training itself utilized data parallelism across both GPUs. While running inference and training concurrently on two GPUs might result in a longer wall-clock training time compared to a setup with dedicated inference GPUs, it significantly reduces the hardware requirement.", + "bbox": [ + 81, + 640, + 916, + 777 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Training Budget. The NVIDIA L40S GPUs we use are accessible via commercial cloud platforms at an approximate rate of \\(1 USD per GPU hour, including 300 GB storage, based on pricing observed at the time of writing (Cudo Compute). The RL training process for our LoRA models proved highly efficient, with a", + "bbox": [ + 81, + 785, + 915, + 838 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 383, + 68, + 611, + 80 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "1https://github.com/huggingface/open-r1", + "bbox": [ + 101, + 847, + 437, + 864 + ], + "page_idx": 4 + }, + { + "type": "page_footnote", + "text": "2Occasionally, NVIDIA RTX 6000 Ada GPUs were used instead, which is reflected in the system configuration metadata on Weights & Biases. From our practical experience, these two GPU types are similar in terms of cost and computational performance. For consistency, we report costs and compute metrics based on the L40S.", + "bbox": [ + 81, + 864, + 915, + 910 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 901, + 946, + 911, + 958 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/a9ee0ea5f785ef391938e14609d9a8376e65fd1348f8d7554bc57edaae2fac3a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
EXPERIMENTAL TASKTRAINING COST EST.EVALUATION COST EST.TOTAL COST EST.
Baseline: Model Re-Evaluation-$6$6
Main: Tina-STILL-3-1.5B-preview$59$7$66
Main: Tina-DeepScaleR-1.5B-Preview$84$10$94
Main: Tina-Open-RS1$40$11$51
Main: Tina-Open-RS2$15$17$32
Main: Tina-Open-RS3$15$17$32
Ablation: OpenThoughts Dataset$84$10$94
Ablation: OpenR1 Dataset$59$7$66
Ablation: LIMR Dataset$4$4$8
Ablation: DrGRPO Algorithm$15$17$32
Ablation: Learning Rate$7$8$15
Ablation: LoRA Rank/Alpha$14$16$30
Total: All Tasks$396$130$526
Total: Main Tasks$213$62$275
Total: Best Ckpt. in Each Main Task$80$5$85
Total: All Ckpt. in Best-Performance Task$14$17$31
Total: Best Ckpt. in Best-Performance Task$8$1$9
", + "bbox": [ + 107, + 104, + 890, + 458 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1: Computational cost breakdown. Costs for all experimental tasks in this paper, measured in USD. The row \"Best Ckpt. in Each Main Task\" denotes the cost of reproducing the best checkpoint in each of Table 7, 8, 9, 10, 11. The row \"All Ckpt. in Best-Performance Task\" denotes the cost of reproducing all checkpoints in Table 10. \"Best Ckpt. in Best-Performance Task\" denotes the cost of reproducing the best checkpoint in Table 10, i.e., the checkpoint at step 450.", + "bbox": [ + 81, + 467, + 915, + 530 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "single RL step typically completing within one minute on this hardware. Evaluating a model checkpoint across our entire suite of six reasoning benchmarks required approximately 1 L40S GPU hours on average. To ensure cost control, we initially established a conservative maximum budget of \\(100 USD for each complete experimental run, encompassing all stages from training to evaluation and miscellaneous tasks. As detailed in Table 1, our actual expenditures were significantly below this ceiling. Our calculation is based on the full Tina model evaluation performance in Appendix D. We believe this low cost makes our setup an accessible testbed for the research community.", + "bbox": [ + 81, + 544, + 916, + 666 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4. Surprising Effectiveness of Efficient RL Reasoning via LoRA", + "text_level": 1, + "bbox": [ + 81, + 684, + 694, + 705 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1. Experiments Stage I: Baseline Model Re-Evaluation", + "text_level": 1, + "bbox": [ + 81, + 719, + 552, + 738 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Before presenting Tina's performance, it is crucial to establish fair and reliable comparisons against existing SOTA reasoning models. We note that performance scores reported in the literature for relevant models often stem from evaluations using disparate frameworks (e.g., verl (Sheng et al., 2025), lighteval (Fourrier et al., 2023), lm-eval-harness (Gao et al., 2024b)) and inconsistent inference settings (such as different generation hyperparameters or varying numbers of GPUs). These variations can significantly influence reported metrics, creating potential inconsistencies and hindering reliable comparisons between models.", + "bbox": [ + 81, + 747, + 916, + 852 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To mitigate these confounding factors, we performed a comprehensive re-evaluation of key baseline models using a single, consistent methodology throughout this paper. All baseline evaluations reported herein utilize the lighteval framework integrated with the vLLM (Kwon et al., 2023) inference engine for efficient", + "bbox": [ + 81, + 858, + 915, + 912 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 611, + 80 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 901, + 946, + 911, + 958 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/56c0a1d34fc3c6892dc823d2eada4025b45a291b64ab9e854d5acd3cf67be71c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BASELINE MODELAIME24AIME25AMC23MATH500GPQAMINervaAvg.
DeepSeek-R1-Distilled-Qwen-1.5B23.3316.6762.5082.6031.8230.1541.18
STILL-3-1.5B-preview26.6726.6767.5086.4034.3427.5744.86
DeepScaleR-1.5B-/Preview36.6726.6777.5087.8031.8231.9948.74
Open-RS126.6720.0072.5083.6035.3528.6844.47
Open-RS226.6713.3362.5085.4034.8526.8441.60
Open-RS343.3320.0067.5083.0033.8428.6846.06
", + "bbox": [ + 129, + 104, + 869, + 243 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2: Baseline model re-evaluation. Performance evaluation of baseline models on six reasoning tasks.", + "bbox": [ + 81, + 253, + 844, + 268 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "generation. For comparability with prior work such as OpenR1, we maintained a fixed hardware configuration (two L40S GPUs) and applied a standardized set of vLLM inference parameters across all evaluated baseline models. All scores are zero-shot pass@1 performance. The exact command structure employed for these evaluations is provided in Appendix C.2 for transparency and reproducibility. The results stemming from this consistent re-evaluation protocol are presented in Table 2.", + "bbox": [ + 81, + 285, + 913, + 371 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Particularly, we evaluate the reasoning capabilities of our Tina models and the baselines across a diverse suite of six challenging benchmarks, primarily focused on mathematical and scientific reasoning:", + "bbox": [ + 81, + 378, + 913, + 414 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- AIME24/25 (Art of Problem Solving, 2024) contains 30 high-school-level math problems in algebra, geometry, number theory, and combinatorics from the 2024/2025 American Invitational Mathematics Examination. Each problem demands precise multi-step reasoning.", + "- AMC23 (Art of Problem Solving, 2023) includes 40 problems from the 2023 American Mathematics Competition, offering a mix of logic and symbolic manipulation tasks.", + "- MATH500 (Hendrycks et al., 2021, Lightman et al., 2023) is a benchmark comprising 500 competition mathematics problems derived from various sources, covering different difficulty levels and often necessitating multi-step derivation and calculation.", + "- GPQA Diamond (Rein et al., 2024), hereafter referred to as GPQA, consists of 198 PhD-level science questions across biology, chemistry, and physics. Each question is multiple-choice with subtle distractors.", + "- Minerva (Lewkowycz et al., 2022) includes 272 quantitative reasoning problems generally at the undergraduate level. The questions span multiple STEM fields, including physics, biology, chemistry, and economics, often requiring mathematical modeling or calculation steps. Includes tasks such as calculating enzyme kinetics from reaction data." + ], + "bbox": [ + 96, + 421, + 913, + 690 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2. Experiments Stage II: Tina Model Evaluation", + "text_level": 1, + "bbox": [ + 81, + 709, + 496, + 724 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We now present the core evaluation results for our Tina models. These experiments assess the reasoning capabilities attained by post-training the DeepSeek-R1-Distill-Qwen-1.5B with minimal parameter updates via LoRA-based RL. The results presented in Table 3 demonstrate that significant reasoning performance can be achieved efficiently, yielding models that are competitive with, or outperform, relevant baselines despite the inherent resource constraints of using parameter-efficient tuning.3", + "bbox": [ + 81, + 736, + 913, + 823 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3 summarizes the performance of five distinct Tina models across a suite of six reasoning tasks:", + "bbox": [ + 81, + 830, + 913, + 848 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 611, + 80 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "3Tables 3 and 4 adopt a consistent naming pattern where \"Tina-X\" denotes our model is the LoRA counterpart of a baseline model X or is trained on a dataset X (possibly followed with an extra ablation setup). This can reflect the model origin and serve as a direct reference to the public checkpoints for reproducibility.", + "bbox": [ + 81, + 858, + 913, + 902 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 901, + 946, + 911, + 958 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/4e7e960fb8433289bbb35bd1a4c84458c12331f5c0c13f4f336813af762fbaef.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TINA MODELSTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.BASELINE
Tina-STILL-3-1.5B-preview53%36.6730.0077.5084.6033.3326.8448.1644.86
Tina-DeepScaleR-1.5B-/Preview19%43.3326.6767.5086.2037.8828.6848.3848.74
Tina-Open-RS134%43.3320.0080.0084.0035.3528.6848.5644.47
Tina-Open-RS251%43.3326.6777.5087.0036.3632.7250.6041.60
Tina-Open-RS357%36.6723.3382.5085.2037.3731.6249.4546.06
", + "bbox": [ + 84, + 104, + 911, + 204 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 3: Tina model evaluation. Performance comparison between Tina models and corresponding full-parameter-trained SOTA models on six reasoning tasks. The value in the Steps column indicates the training steps of the best model checkpoint within one epoch, the full model checkpoint evaluation is shown in Appendix D. The Baseline column represents the average score achieved by baseline model with full-parameter RL in Table 2.", + "bbox": [ + 84, + 214, + 913, + 273 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "AIME24/25, AMC23, MATH500, GPQA, and Minerva. For each Tina model, we report the extent of training completed (as a percentage of a predefined training stpes within 1 epoch) and the percentage scores achieved on each task. The results compellingly demonstrate the efficacy of our economical LoRA-based RL strategy. All Tina models exhibit substantial reasoning aptitude, achieving average scores in the range of $48.16\\%$ to $50.60\\%$ . Significantly, nearly all Tina models notably outperform their corresponding baseline average scores, indicating marked improvements instilled by the parameter-efficient RL. The Tina-Open-RS2 model yielded the highest average performance observed at $50.60\\%$ . Furthermore, these strong results were achieved with remarkably limited training durations, ranging from just $19\\%$ to $57\\%$ of a full training epoch, highlighting the efficiency and rapid adaptation enabled by the Tina approach. These findings strongly support our central hypothesis: robust reasoning capabilities can be effectively and economically cultivated in small language models through the targeted application of LoRA and RL.", + "bbox": [ + 84, + 301, + 913, + 489 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3. Experiments Stage III: Tina Ablation Variants", + "text_level": 1, + "bbox": [ + 84, + 507, + 501, + 523 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To better understand the factors influencing the performance and efficiency of our Tina models within the proposed low-cost framework, we conducted a series of ablation studies. These studies systematically investigate the impact of key design choices and hyperparameter: the underlying training dataset, the learning rate for LoRA updates, the rank of the LoRA adapters, and the specific RL algorithm employed. In each study, we typically varied one factor while holding others constant, often based on a high-performing configuration identified in our main experiments or preliminary runs. The results, summarized in Table 4, provide valuable insights into the robustness and sensitivity of our economical approach.", + "bbox": [ + 84, + 535, + 913, + 655 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Impact of Training Dataset. The first section of Table 4 highlights the influence of the dataset used for RL. We compared seven distinct datasets, varying significantly in size (from $\\approx 1.4\\mathrm{k}$ to $\\approx 94\\mathrm{k}$ samples). Strikingly, the Tina-0pen-RS model, trained on a concise dataset of merely 7k examples, achieved the highest average score (50.60%). This outcome surpasses models trained on considerably larger datasets, such as Tina-0penR1 (93.7k samples, 49.26% avg). This observation strongly supports our core \"Tiny\" premise and reflects the intuition that the quality and diversity of the dataset matter more than the data size.", + "bbox": [ + 84, + 664, + 913, + 767 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Sensitivity to Learning Rate. Using the Tina-LIMR configuration as a testbed (second section of Table 4), we assessed sensitivity to the learning rate. Among the tested values $(5 \\times 10^{-6}, 1 \\times 10^{-6}$ , and $5 \\times 10^{-7}$ ), a learning rate of $1 \\times 10^{-6}$ yielded the optimal average performance $(48.47\\%)$ for this setup. While performance differences were not drastic, this indicates that learning rate selection remains a factor, although effective results were obtained without extensive tuning.", + "bbox": [ + 84, + 775, + 913, + 859 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Effect of LoRA Rank. The third ablation study investigated the impact of LoRA rank, which directly controls the number of trainable parameters. Testing ranks 4, 8, 16, 32, and 64 on the Tina-LIMR setup, we observed", + "bbox": [ + 84, + 869, + 911, + 902 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 387, + 68, + 609, + 80 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 901, + 948, + 911, + 955 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/7b908d3846cc3b7d51a45a7110eb4424a76ec798139377eb728451a8b9a8038e.jpg", + "table_caption": [], + "table_footnote": [ + "Table 4: Tina ablation variants evaluation. Performance evaluation of Tina's ablation variants on six reasoning tasks. The value in the Steps column indicates the training steps of the best model checkpoint within one epoch, the full model checkpoint evaluation is shown in Appendix D. For the number in parentheses (the ablation on datasets), it means the data size of a dataset. During training, this number should be multiplied by the number of generation in GRPO-like algorithm (in our case, that multiplier is 4). For the model names, Tina-LIMR, Tina-LIMR-1e-6-1r and Tina-LIMR-32-LoRA-rank are the same model, we duplicate them for better visualization. The same idea applies to Tina-DeepScaleR and Tina-DeepScaleR-1.5B-Preview, Tina-STILL-3 and Tina-STILL-3-1.5B-preview, Tina-Open-S1 and Tina-Open-RS1, Tina-Open-RS and Tina-Open-RS2, Tina-Open-RS3-GRPO and Tina-Open-RS3." + ], + "table_body": "
ABLATION ON DATASETSSTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
Tina-OpenR1 (93.7k)13%36.6726.6775.0086.8039.9030.5149.26
Tina-OpenThoughts (66.1k)30%36.6726.6772.5084.8041.4133.0949.19
Tina-DeepScaleR (40.3k)19%43.3326.6767.5086.2037.8828.6848.38
Tina-STILL-3 (33k)53%36.6730.0077.5084.6033.3326.8448.16
Tina-Open-S1 (18.6k)34%43.3320.0080.0084.0035.3528.6848.56
Tina-Open-RS (7k)51%43.3326.6777.5087.0036.3632.7250.60
Tina-LIMR (1.39k)58%46.6720.0075.0083.8034.8530.5148.47
ABLATION ON LEARNING RATESTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
Tina-LIMR-5e-6-lr29%36.6726.6775.0083.6035.8629.4147.87
Tina-LIMR-1e-6-lr58%46.6720.0075.0083.8034.8530.5148.47
Tina-LIMR-5e-7-lr58%43.3316.6777.5084.6034.8530.5147.91
ABLATION ON LORA RANKSTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
Tina-LIMR-64-LoRA-rank29%20.0030.0077.5084.2038.3831.6246.95
Tina-LIMR-32-LoRA-rank58%46.6720.0075.0083.8034.8530.5148.47
Tina-LIMR-16-LoRA-rank58%43.3333.3370.0083.2035.3528.3148.92
Tina-LIMR-8-LoRA-rank29%30.0026.6782.5083.8033.8430.5147.89
Tina-LIMR-4-LoRA-rank86%36.6720.0085.0083.8031.8229.0447.72
ABLATION ON RL ALGORITHMSTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
Tina-Open-RS3-GRPO57%36.6723.3382.5085.2037.3731.6249.45
Tina-Open-RS3-DrGRPO17%43.3323.3380.0085.0035.3530.1549.53
", + "bbox": [ + 84, + 104, + 911, + 473 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "considerable robustness. Ranks 8, 16, and 32 all produced strong results, with average scores clustering between $47.89\\%$ and $48.92\\%$ . Notably, rank 16 achieved the peak performance $(48.92\\%)$ in this comparison, slightly outperforming rank 32 $(48.47\\%)$ . Performance decreased slightly at the extremes (rank 4 and 64). This study validates that highly parameter-efficient configurations (low ranks like 16 or 32) are effective, further enhancing the cost-effectiveness and minimal overhead of the Tina approach.", + "bbox": [ + 81, + 635, + 916, + 722 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Comparison of RL Algorithms. Finally, we compared two RL algorithms, GRPO and Dr.GRPO (Liu et al., 2025), using the Tina-Open-RS3 setup (final section of Table 4). Both algorithms led to similar peak average performance levels (49.45% for GRPO vs. 49.53% for Dr.GRPO). However, Dr.GRPO reached its best checkpoint significantly earlier in the training process (17% of an epoch vs. 57% for GRPO). This suggests potential advantages in sample efficiency for Dr.GRPO in this context with an alternative normalization in loss calculation, offering potentially faster convergence and further reductions in training time and cost.", + "bbox": [ + 81, + 729, + 916, + 833 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 609, + 80 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 901, + 946, + 911, + 958 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5. Hypothesis for Effective and Efficient LoRA: Rapid Format Adaptation", + "text_level": 1, + "bbox": [ + 86, + 104, + 795, + 125 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Less is More LoRA-based RL. To understand why LoRA facilitates both effective and efficient reasoning improvements via RL, we analyze the relationship between training compute and performance, alongside training dynamics. As illustrated in Figure 3, plotting reasoning performance against approximate training FLOPs reveals a stark contrast between full-parameter and LoRA-based training regimes. First, our LoRA-based Tina models achieve reasoning scores comparable or superior to fully fine-tuned baselines while requiring (in some cases) orders of magnitude fewer training FLOPs. We observe that in LoRA models, increased training compute inversely affects performance, in contrast to full-parameter models. This observation highlights a \"less compute can yield more performance\" phenomenon.", + "bbox": [ + 86, + 138, + 913, + 275 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/1a3b1780e0c1635d42f9fb927eb80189e6d0502e9d868b337a308db1217364ff.jpg", + "image_caption": [ + "Figure 3: Less is more LoRA-based RL. Approximate training FLOPs vs reasoning performance comparison between Tina and baseline models. The calculation is detailed in Appendix A." + ], + "image_footnote": [], + "bbox": [ + 86, + 291, + 908, + 518 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "This finding supports our hypothesis regarding how LoRA achieves such remarkable efficiency, which relates to the principle of \"learn structure/format, maintain knowledge.\" We posit that LoRA excels in this scenario because RL for reasoning heavily rewards the model's ability to generate outputs in a specific, verifiable format or structure (e.g., step-by-step reasoning chains). LoRA appears to be highly adept at learning these structural and stylistic patterns with minimal parameter changes, thus requiring very few FLOPs. At the same time, because LoRA modifies only a tiny fraction of the weights, it largely preserves the base model's vast pre-trained knowledge. Therefore, LoRA efficiently teaches the model how to format its existing knowledge into effective reasoning traces, rather than potentially imposing costly relearning of concepts or procedures that extensive full-parameter updates might entail. We hypothesize that this focus on structural adaptation allows Tina to achieve high reasoning performance with minimal computational investment.", + "bbox": [ + 86, + 579, + 913, + 750 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Phase Transition in LoRA-based RL. Further insights into the LoRA-based RL mechanism arise from analyzing the training logs. That is, a distinct pattern emerges in Figure 4, which displays accuracy rewards, format rewards, and completion lengths over training steps for various Tina model runs. We consistently observe a training phase transition or turning point evident in the format-related metrics (format reward, row 2; completion length, row 3) across most Tina models. Around this transition point (indicated by the green vertical dashed line), the format reward often peaks or destabilizes, while the completion length frequently reaches a minimum before potentially reversing its trend. Notably, this relatively sharp transition observed in format and length metrics does not typically have a corresponding distinct turning point in the accuracy reward plots (row 1). The accuracy reward often exhibits more gradual fluctuations or slower drift over the", + "bbox": [ + 86, + 758, + 913, + 912 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 387, + 68, + 611, + 80 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 895, + 948, + 911, + 955 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "training duration, without a clear inflection aligned with the format transition.", + "bbox": [ + 81, + 107, + 694, + 125 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/d40c018fc96b24189e82fe7ab40c407172146c7f87651959576d69ef64fe1e94.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 124, + 138, + 491, + 303 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/59fd125d2f1a13bd4fccfe6713425a2c004cd392986e4c427eb467173796bb57.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 137, + 872, + 303 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/a927aeddf98e9e4ddcd83a2c9de8d7f68786d344059baf05973d53f2a46f7f94.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 124, + 306, + 491, + 470 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/a253375ed3cfaee7d217e3d0f9b8bbed5a508724cc1b432063d0e1b796a0d874.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 306, + 872, + 470 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/a680baf93b2b90175aa50f3beb3cff201d9162fda811c1a9da2a5e11491e4d83.jpg", + "image_caption": [ + "Figure 4: Phase transition in LoRA-based RL. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1. The \"training turning point\" in the legend means the step where the format-like metrics (e.g., format reward, completion length) start to destabilize. Refer to Appendix E for the full set of plots." + ], + "image_footnote": [], + "bbox": [ + 125, + 474, + 491, + 638 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/d641c625095868eb4819266432c0c7a072fc31c2f491c3d79d392f1711d5b00e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 474, + 872, + 638 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Another crucial observation is the timing of optimal performance: the best-performing checkpoint, yielding the highest reasoning accuracy on held-out evaluations, consistently occurs just prior to or around this observed phase transition point in the format metrics (indicated by the red vertical dashed line). This decoupling between the dynamics of accuracy-based and format-based metrics suggests that the LoRA-based RL process rapidly optimizes the model's ability to adhere to the structural and stylistic elements rewarded by the format score and length constraints. The subsequent transition point may signify where this structural optimization saturates, becomes unstable, or perhaps begins to compromise generative quality in other ways (e.g., by overly constraining or expanding length). The fact that peak reasoning accuracy is achieved just before this format-driven transition implies that while learning the correct output format is essential and efficiently achieved via LoRA, pushing further on format-centric optimization alone does not necessarily", + "bbox": [ + 81, + 731, + 916, + 905 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 611, + 80 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 895, + 946, + 911, + 958 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "yield better reasoning, and may even be detrimental. This reinforces our hypothesis that LoRA efficiently adapts the model by primarily learning the form required for effective reasoning.", + "bbox": [ + 84, + 107, + 911, + 141 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 84, + 169, + 223, + 186 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We presented Tina to demonstrate that effective reasoning capabilities can be instilled in language models with efficiency and effectiveness. The principal contribution of Tina lies in democratizing access to RL-driven reasoning model development. By combining LoRA with RL on a 1.5B parameter base model, we achieved reasoning performance competitive with significantly larger models, accomplishing this within an estimated computational budget of only $9. This outcome prompts reflection on the factors enabling such minimalist approaches, and on their possible future trajectories. Despite encouraging results, this work is subject to certain limitations:", + "bbox": [ + 84, + 202, + 911, + 321 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Base Model Scale: Our experiments centered on a 1.5B parameter model. While showcasing cost-performance efficiency, the absolute reasoning ceiling achievable with this \"tiny\" model may naturally be lower for complex, multi-step reasoning problems than what larger models can offer.", + "bbox": [ + 84, + 330, + 911, + 380 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Reasoning Task Scope: Our evaluation focused primarily on mathematical and formal logic reasoning benchmarks (AIME, AMC, MATH, GPQA, Minerva). The effectiveness and transferability of the learned reasoning skills to other domains, such as coding, warrants further investigation.", + "bbox": [ + 84, + 390, + 911, + 441 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Hyperparameter Optimization: We intentionally minimized hyperparameter tuning costs by adopting established configurations. While this demonstrates a certain form of robustness to our methodology, there may be potential for further performance gains derived from additional tuning, perhaps tailored to the interplay between LoRA, the RL algorithm, and the target reasoning tasks.", + "bbox": [ + 84, + 450, + 911, + 518 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "7. Acknowledgment", + "text_level": 1, + "bbox": [ + 84, + 546, + 285, + 564 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We want to express our gratitude to the broader open-source community. This research was made possible by leveraging numerous publicly available resources, including training and evaluation framework, open datasets, accessible pre-trained language models, and the insights shared through technical reports. The computational resources required for the experiments described herein were provided by the Center for Advanced Research Computing (CARC) at the University of Southern California (USC). We are grateful for the support which enabled the training and evaluation of our models. J.A. was supported by the National Science Foundation Graduate Research Fellowship Program under Grant No. DGE-1842487. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect the views of the National Science Foundation.", + "bbox": [ + 84, + 579, + 911, + 732 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 387, + 68, + 611, + 80 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 895, + 946, + 911, + 955 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 84, + 106, + 194, + 122 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Pranjal Aggarwal and Sean Welleck. L1: Controlling how long a reasoning model thinks with reinforcement learning, 2025. URL https://arxiv.org/abs/2503.04697.", + "Zeyuan Allen-Zhu and Yuanzhi Li. Physics of language models: Part 3.3, knowledge capacity scaling laws. In Proceedings of International Conference on Learning Representations (ICLR), 2025.", + "Art of Problem Solving. Amc problems and solutions, 2023. URL https://artofproblemsolving.com/wiki/index.php/AMC_12_Problems_and_Solutions.", + "Art of Problem Solving. Aime problems and solutions, February 2024. URL https://artofproblemsolving.com/wiki/index.php/AIME_Problems_and_Solutions.", + "Yinlam Chow, Guy Tennenholtz, Izzeddin Gur, Vincent Zhuang, Bo Dai, Sridhar Thiagarajan, Craig Boutilier, Rishabh Agarwal, Aviral Kumar, and Aleksandra Faust. Inference-aware fine-tuning for Best-of-N sampling in large language models, 2024. URL https://arxiv.org/abs/2412.15287.", + "Cudo Compute. Nvidia L40S pricing. URL https://www.cudocompute.com/products/gpu-cloud/nvidia-l40s. Accessed: 2025-04-21.", + "Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, Jiarui Yuan, Huayu Chen, Kaiyan Zhang, Xingtai Lv, Shuo Wang, Yuan Yao, Xu Han, Hao Peng, Yu Cheng, Zhiyuan Liu, Maosong Sun, Bowen Zhou, and Ning Ding. Process reinforcement through implicit rewards, 2025. URL https://arxiv.org/abs/2502.01456.", + "Quy-Anh Dang and Chris Ngo. Reinforcement learning for reasoning in small llms: What works and what doesn't, 2025. URL https://arxiv.org/abs/2503.16219.", + "DeepSeek-AI. DeepSeek-R1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948.", + "Clémentine Fourrier, Nathan Habib, Hynek Kydlíček, Thomas Wolf, and Lewis Tunstall. Lighteval: A lightweight framework for llm evaluation, 2023. URL https://github.com/huggingface/lighteval.", + "Bofei Gao, Feifan Song, Zhe Yang, Zefan Cai, Yibo Miao, Qingxiu Dong, Lei Li, Chenghao Ma, Liang Chen, Runxin Xu, Zhengyang Tang, Benyou Wang, Daoguang Zan, Shanghaoran Quan, Ge Zhang, Lei Sha, Yichang Zhang, Xuancheng Ren, Tianyu Liu, and Baobao Chang. Omni-MATH: A universal olympiad level mathematic benchmark for large language models, 2024a. URL https://arxiv.org/abs/2410.07985.", + "Leo Gao, Jonathan Tow, Baber Abbasi, Stella Biderman, Sid Black, Anthony DiPofi, Charles Foster, Laurence Golding, Jeffrey Hsu, Alain Le Noac'h, Haonan Li, Kyle McDonell, Niklas Muennighoff, Chris Ociepa, Jason Phang, Laria Reynolds, Hailey Schoelkopf, Aviya Skowron, Lintang Sutawika, Eric Tang, Anish Thite, Ben Wang, Kevin Wang, and Andy Zou. A framework for few-shot language model evaluation, 07 2024b. URL https://zenodo.org/records/12608602.", + "Sylvain Gugger, Lysandre Debut, Thomas Wolf, Philipp Schmid, Zachary Mueller, Sourab Mangrulkar, Marc Sun, and Benjamin Bossan. Accelerate: Training and inference at scale made simple, efficient and adaptable., 2022. URL https://github.com/huggingface/accelerate." + ], + "bbox": [ + 83, + 138, + 915, + 907 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 383, + 68, + 611, + 80 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 895, + 946, + 911, + 958 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zeyu Han, Chao Gao, Jinyang Liu, Jeff Zhang, and Sai Qian Zhang. Parameter-efficient fine-tuning for large models: A comprehensive survey, 2024. URL https://arxiv.org/abs/2403.14608.", + "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset, 2021. URL https://arxiv.org/abs/2103.03874.", + "Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuzhhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LoRA: Low-rank adaptation of large language models, 2021. URL https://arxiv.org/abs/2106.09685.", + "Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. Open-Reasoner-Zero: An open source approach to scaling reinforcement learning on the base model, 2025. URL https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero.", + "Zhen Huang, Haoyang Zou, Xuefeng Li, Yixiu Liu, Yuxiang Zheng, Ethan Chern, Shijie Xia, Yiwei Qin, Weizhe Yuan, and Pengfei Liu. O1 replication journey - part 2: Surpassing o1-preview through simple distillation, big progress or bitter lesson?, 2024. URL https://arxiv.org/abs/2411.16489.", + "Hugging Face. Open r1: A fully open reproduction of deepseek-r1, January 2025. URL https://github.com/huggingface/open-r1.", + "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of Symposium on Operating Systems Principles (SOSP), 2023.", + "Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V. Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, Yuling Gu, Saumya Malik, Victoria Graf, Jena D. Hwang, Jiangjiang Yang, Ronan Le Bras, Oyvind Tafjord, Chris Wilhelm, Luca Soldaini, Noah A. Smith, Yizhong Wang, Pradeep Dasigi, and Hannaneh Hajishirzi. Tulu 3: Pushing frontiers in open language model post-training, 2025. URL https://arxiv.org/abs/2411.15124.", + "Aitor Lewkowycz, Anders Andreassen, David Dohan, Ethan Dyer, Henryk Michalewski, Vinay Ramasesh, Ambrose Slone, Cem Anil, Imanol Schlag, Theo Gutman-Solo, Yuhuai Wu, Behnam Neyshabur, Guy Gur-Ari, and Vedant Misra. Solving quantitative reasoning problems with language models. In Proceedings of Advances in Neural Information Processing Systems (NeurIPS), volume 35, pages 3843-3857, 2022.", + "Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. NuminaMath, 2024. URL https://huggingface.co/AI-MO/NuminaMath-CoT.", + "Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In Proceedings of International Conference on Learning Representations (ICLR), 2023.", + "Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective, 2025. URL https://arxiv.org/abs/2503.20783.", + "Michael Luo, Sijun Tan, Justin Wong, Xiaoxiang Shi, William Y. Tang, Manan Roongta, Colin Cai, Jeffrey Luo, Tianjun Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. DeepScaleR: Surpassing o1-preview with a 1.5b model by scaling rl, 2025. URL https://agentica-project.com/." + ], + "bbox": [ + 84, + 107, + 915, + 914 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 383, + 68, + 611, + 80 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 895, + 948, + 911, + 957 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Trung Quoc Luong, Xinbo Zhang, Zhanming Jie, Peng Sun, Xiaoran Jin, and Hang Li. ReFT: Reasoning with reinforced fine-tuning, 2024. URL https://arxiv.org/abs/2401.08967.", + "Chengqi Lyu, Songyang Gao, Yuzhe Gu, Wenwei Zhang, Jianfei Gao, Kuikun Liu, Ziyi Wang, Shuaibin Li, Qian Zhao, Haian Huang, Weihan Cao, Jiangning Liu, Hongwei Liu, Junnan Liu, Songyang Zhang, Dahua Lin, and Kai Chen. Exploring the limit of outcome reward for learning mathematical reasoning, 2025. URL https://arxiv.org/abs/2502.06781.", + "Sourab Mangrulkar, Sylvain Gugger, Lysandre Debut, Younes Belkada, Sayak Paul, and Benjamin Bossan. PEFT: State-of-the-art parameter-efficient fine-tuning methods, 2022. URL https://github.com/huggingface/peft.", + "Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, Wayne Xin Zhao, Zheng Liu, Zhongyuan Wang, and Ji-Rong Wen. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems, 2024. URL https://arxiv.org/abs/2412.09413.", + "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393.", + "NovaSky Team. Sky-T1: Train your own o1 preview model within $450, 2025. URL https://novasky-ai.github.io/posts/sky-t1.", + "OpenAI. OpenAI o1 system card, 2024. URL https://arxiv.org/abs/2412.16720.", + "OpenThoughts Team. Open Thoughts, January 2025. URL https://open-thoughts.ai.", + "Debjit Paul, Mete Ismayilzada, Maxime Peyrard, Beatrix Borges, Antoine Bosselut, Robert West, and Boi Faltings. REFINER: Reasoning feedback on intermediate representations. In Proceedings of European Chapter of the ACL (EACL), pages 1100-1126, 2024.", + "Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual reasoning makes smaller LLMs stronger problem-solvers, 2024. URL https://arxiv.org/abs/2408.06195.", + "Samyam Rajbhandari, Jeff Rasley, Olatunj Ruwase, and Yuxiong He. Zero: Memory optimization towards training A trillion parameter models. CoRR, abs/1910.02054, 2019. URL http://arxiv.org/abs/1910.02054.", + "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R Bowman. GPQA: A graduate-level google-proof Q&A benchmark. In Proceedings of Conference on Language Modeling (COLM), 2024.", + "RUCAIBox STILL Team. STILL-3-1.5B-preview: Enhancing slow thinking abilities of small models through reinforcement learning. 2025. URL https://github.com/RUCAIBox/Slow_Thinking_with_LLMs.", + "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, Y. K. Li, Y. Wu, and Daya Guo. DeepSeekMath: Pushing the limits of mathematical reasoning in open language models, 2024. URL https://arxiv.org/abs/2402.03300." + ], + "bbox": [ + 84, + 107, + 916, + 871 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 383, + 68, + 611, + 80 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 895, + 946, + 911, + 958 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. Hybridflow: A flexible and efficient rlhf framework. In Proceedings of European Conference on Computer Systems (EuroSys), EuroSys '25, page 1279-1297. ACM, March 2025. doi: 10.1145/3689031.3696075. URL http://dx.doi.org/10.1145/3689031.3696075.", + "Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. Trl: Transformer reinforcement learning, 2020. URL https://github.com/huggingface/trl.", + "Shangshang Wang and Willie Neiswanger. LLM reasoning: Curated insights, 2025. URL https://shangshangwang.notion.site/llm-reasoning.", + "Zhenyu Wu, Qingkai Zeng, Zhihan Zhang, Zhaoxuan Tan, Chao Shen, and Meng Jiang. Large language models can self-correct with key condition verification. In Proceedings of Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 12846-12867, 2024.", + "Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, Chenyang Shao, Yuwei Yan, Qinglong Yang, Yiwen Song, Sijian Ren, Xinyuan Hu, Yu Li, Jie Feng, Chen Gao, and Yong Li. Towards large reasoning models: A survey of reinforced reasoning with large language models, 2025. URL https://arxiv.org/abs/2501.09686.", + "Weihao Zeng, Yuzhen Huang, Qian Liu, Wei Liu, Keqing He, Zejun Ma, and Junxian He. SimpleRL-Zoo: Investigating and taming zero reinforcement learning for open base models in the wild, 2025. URL https://arxiv.org/abs/2503.18892." + ], + "bbox": [ + 83, + 107, + 916, + 489 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 383, + 68, + 611, + 80 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 895, + 946, + 911, + 958 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Appendix", + "text_level": 1, + "bbox": [ + 442, + 104, + 542, + 126 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A. Cost Breakdown", + "text_level": 1, + "bbox": [ + 84, + 148, + 277, + 167 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "This section provides further details on how training data amounts, computational cost, time cost, and performance metrics reported in this paper – particularly those presented in figures like Figures 1 and 3 – were determined and should be interpreted.", + "bbox": [ + 84, + 181, + 916, + 233 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Overall Comparison (Figure 1). For the baseline models included in Figure 1, the approximate training data amounts, computational costs (typically reported as GPU hours or total FLOPs), and training times are sourced from their respective technical reports or publications, leveraging the helpful summary provided in the Open-RS paper (Dang and Ngo, 2025). Reasoning performance scores for all models, encompassing both baselines and our Tina models, stem from results presented in Tables 2 and 3.", + "bbox": [ + 84, + 242, + 913, + 327 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Also, it is crucial to understand the scope of reported costs:", + "bbox": [ + 84, + 335, + 545, + 353 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Epoch vs. Best Checkpoint: Costs cited for Tina and baseline models reflect the resources needed to complete a full training epoch or a predefined training run, not necessarily the minimal cost to reach the single best-performing checkpoint within that run.", + "- Training vs. Evaluation: Reported costs cover training only, omitting the computational expense required for model evaluation across benchmarks since such information is missing from several baseline models." + ], + "bbox": [ + 96, + 362, + 911, + 452 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Particularly, the $9 USD in the abstract represents the estimated cost to train the Tina model up to its best-performing checkpoint and subsequently evaluate that specific checkpoint. For context comparing potential full training runs, the cost to train a Tina model for a complete epoch is$ 14 USD (training only). Including evaluation costs for such a full run would increase the total to approximately $31 USD. We emphasize the $9 as representing the efficient path to the best Tina model.", + "bbox": [ + 84, + 463, + 913, + 547 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "FLOPs Estimation (Figure 3). The approximate training FLOPs shown in Figure 3 serve as a hardware-agnostic measure of computational work. For both Tina and baseline models, these values were estimated based on reported training durations and hardware configurations sourced from technical reports or the Open-RS summary, using standard FLOPs calculation methodologies.", + "bbox": [ + 84, + 556, + 913, + 626 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 611, + 80 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 895, + 946, + 911, + 955 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "B. Background behind Tina Training", + "text_level": 1, + "bbox": [ + 83, + 104, + 449, + 125 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "B.1. GRPO Formulation", + "text_level": 1, + "bbox": [ + 83, + 140, + 284, + 157 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Recall the following formulation of GRPO: For each question $q$ , GRPO samples a group $G = \\{o_1, o_2, \\ldots, o_G\\}$ of outputs from the old policy $\\pi_{\\theta_{\\mathrm{old}}}$ and optimizes the policy $\\pi_{\\theta}$ by maximizing the following objective:", + "bbox": [ + 81, + 167, + 913, + 205 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\underset { \\begin{array}{c} q \\sim P (Q), \\\\ \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta_ {\\mathrm {o l d}}} (O | q) \\end{array} } {\\mathbb {E}} \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\left(\\min \\left(\\frac {\\pi_ {\\theta} (o _ {i} | q)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} (o _ {i} | q)} A _ {i}, \\operatorname {c l i p p e d} \\left(\\frac {\\pi_ {\\theta} (o _ {i} | q)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} (o _ {i} | q)}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {i}\\right) - \\beta \\mathbb {D} _ {\\mathrm {K L}} (\\pi_ {\\theta} | | \\pi_ {\\mathrm {r e f}})\\right) \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 109, + 215, + 885, + 276 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Here $A_{i}$ denotes the advantage computed from a group of rewards $\\{r_1,r_2,\\dots ,r_G\\}$", + "bbox": [ + 81, + 287, + 720, + 305 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\nA _ {i} = \\frac {r _ {i} - \\mathrm {m e a n} (\\{r _ {1} , r _ {2} , \\ldots , r _ {G} \\})}{\\mathrm {s t d} (\\{r _ {1} , r _ {2} , \\ldots , r _ {G} \\})},\n$$\n", + "text_format": "latex", + "bbox": [ + 370, + 315, + 625, + 352 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "and", + "bbox": [ + 81, + 363, + 119, + 378 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {D} _ {\\mathrm {K L}} (\\pi_ {\\theta} | | \\pi_ {\\mathrm {r e f}}) = \\frac {\\pi_ {\\mathrm {r e f}} (o _ {i} | q)}{\\pi_ {\\theta} (o _ {i} | q)} - \\log \\frac {\\pi_ {\\mathrm {r e f}} (o _ {i} | q)}{\\pi_ {\\theta} (o _ {i} | q)} - 1.\n$$\n", + "text_format": "latex", + "bbox": [ + 313, + 375, + 684, + 412 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Note that $\\epsilon$ and $\\beta$ are parameters controlling the clipping range and KL penalty, respectively.", + "bbox": [ + 81, + 417, + 803, + 435 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "B.2. LoRA Formulation", + "text_level": 1, + "bbox": [ + 83, + 460, + 279, + 476 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We follow the standard LoRA setup (Hu et al., 2021). Given a frozen pretrained weight matrix $W_0 \\in \\mathbb{R}^{d \\times k}$ and trainable low-rank matrices $A \\in \\mathbb{R}^{d \\times r}$ and $B \\in \\mathbb{R}^{r \\times k}$ with $r \\ll \\min(d, k)$ , the original forward pass $h(x) = W_0 x$ is modified as", + "bbox": [ + 81, + 489, + 913, + 546 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {h} (x) = W _ {0} x + A B x.\n$$\n", + "text_format": "latex", + "bbox": [ + 419, + 556, + 575, + 577 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We use the default LoRA implementation provided in the PEFT (Mangrulkar et al., 2022) library.", + "bbox": [ + 81, + 589, + 841, + 608 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 383, + 68, + 611, + 80 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 895, + 946, + 911, + 957 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C. Additional Experimental Details", + "text_level": 1, + "bbox": [ + 83, + 104, + 431, + 125 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "C.1. Hyperparameters", + "text_level": 1, + "bbox": [ + 83, + 140, + 274, + 157 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We show our default choice of hyperparameter in Table 5 for all the LoRA-based RL experiments.", + "bbox": [ + 81, + 167, + 836, + 186 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/8360271f6e05414bddb8422616944a29e0ee5c6272108bd1fee1ef9fbeefeb86.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Tina-STILL-3-1.5B-previewLoRA
Tina-DeepScaleR-1.5B-PreviewLoRA
Tina-Open-RS{X}-{Y}LoRA
Tina-LIMR-{Z}LoRA
Tina-OpenR1LoRA
Tina-OpenThoughtsLoRA
LoRA Modulesquery, key, value, dense
LoRA Rank32
LoRA α128
LoRA Dropout0.05
AlgorithmGRPO
OptimizerAdamW
Optimizer Momentumβ1, β2 = 0.9, 0.999
Learning Rate1e-6
LR SchedulerCosine with Min LR
Warmup Ratio0.1
PrecisionBF16-mixed
Gradient Accumulation Step4
Total Train Batch Size32
Epochs1
Hardware2 × NVIDIA L40S
Max Prompt Length512
Max Completion Length3584
Number of Generation4
Vllm GPU Memory Utilization0.4
Vllm Max Model Length4608
", + "bbox": [ + 282, + 196, + 710, + 632 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 5: Common hyperparameter settings.", + "bbox": [ + 81, + 652, + 395, + 669 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We also show the varied hyperparameter in Table 6 for all the LoRA-based RL experiments. Particularly, all the reward types including Accuracy, Format, Length, Cosine, Tag Count, Reasoning Steps, Repetition Penalty, are defined and implemented by the OpenR1 code repository.4", + "bbox": [ + 81, + 686, + 916, + 739 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 611, + 80 + ], + "page_idx": 18 + }, + { + "type": "page_footnote", + "text": "4https://github.com/huggingface/open-r1", + "bbox": [ + 102, + 896, + 379, + 912 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 895, + 946, + 911, + 958 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/decc081a621463503dc87bdda5aa04774f28a8003bed84c4134f7d368e1c0ad1.jpg", + "table_caption": [], + "table_footnote": [ + "Table 6: Varied hyperparameter settings where “-” means unchanged from the common settings in Table 5." + ], + "table_body": "
ModelLoRA RankLoRA AlphaLoRA DropoutAlgorithmLearning RateReward TypeReward Weights
Tina-STILL-3-1.5B-preview-----Accuracy, Length2, 1
Tina-DeepScaleR-1.5B-Preview-----Accuracy, Format2, 1
Tina-Open-RS3-----Cosine, Format2, 1
Tina-Open-RS3-DrGRPO---DrGRPO-Cosine, Format2, 1
Tina-Open-RS2-----Accuracy, Format2, 1
Tina-Open-RS1-----Accuracy, Format2, 1
Tina-LIMR-----Accuracy, Format2, 1
Tina-LIMR-5e-6-lr----5e-6Accuracy, Format2, 1
Tina-LIMR-5e-7-lr----5e-7Accuracy, Format2, 1
Tina-LIMR-64-LoRA-rank64256---Accuracy, Format2, 1
Tina-LIMR-16-LoRA-rank1664---Accuracy, Format2, 1
Tina-LIMR-8-LoRA-rank832---Accuracy, Format2, 1
Tina-LIMR-4-LoRA-rank416---Accuracy, Format2, 1
Accuracy, Cosine, Format, Length, Tag Count, Reasoning Steps, Repetition Penalty1, 1, 1, 1, 1, 1
Tina-OpenR1-----Accuracy, Cosine, Format, Length, Tag Count, Reasoning Steps, Repetition Penalty1, 1, 1, 1, 1, 1
Tina-OpenThoughts-----
", + "bbox": [ + 223, + 90, + 712, + 907 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 387, + 69, + 609, + 80 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 895, + 948, + 911, + 957 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "C.2. Evaluation Command", + "text_level": 1, + "bbox": [ + 86, + 108, + 305, + 123 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The following is the evaluation command we use to combine lighteval and vLLM for performance evaluation on reasoning tasks. The MODEL_PATH should be replaced with either the local path or huggingface identifier to the model to be evaluated. TASK should be one of the six reasoning tasks including aime24, aime25, amc23, math_500, gpqa: diamond, and minerva. PATH_TO_OPEN_R1_EVALUATEScript should be the path to the custom evaluate script provided by OpenR1.", + "bbox": [ + 84, + 135, + 913, + 223 + ], + "page_idx": 20 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "MODEL Arguments=\"pretrained=\\(MODEL_PATH, dtype=float16, data_parallel_size=2, max_model_length=32768, gpu_memory Utilization=0.5, generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}\"", + "guess_lang": "python", + "bbox": [ + 84, + 246, + 952, + 349 + ], + "page_idx": 20 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "lighteval vllm $MODEL.argS \"custom|$TASK|0|0\"\n--custom-tasks $PATH_TO_OPEN_R1_EVALUATE-script\n--use-chat-template", + "guess_lang": "shell", + "bbox": [ + 84, + 366, + 612, + 416 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 383, + 68, + 609, + 80 + ], + "page_idx": 20 + }, + { + "type": "footer", + "text": "5https://github.com/huggingface/open-r1/blob/4f5b21e21dec473af9729bce8e084deb16223ae4/src/open_r1/Evaluate.py", + "bbox": [ + 102, + 896, + 893, + 912 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 895, + 948, + 910, + 957 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "D. Full Tina Model Performance Evaluation", + "text_level": 1, + "bbox": [ + 86, + 104, + 511, + 123 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In this section, we present all Tina models' detailed evaluation performance during post-training across six reasoning tasks including AIME24/25, AMC23, MATH500, GPQA and Minerva.", + "bbox": [ + 84, + 138, + 913, + 172 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/489481ae846805813de3af37b2eba7e7c9d549e8a430de5eda6c253bfc5d0a50.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CHECKPOINT STEPS (3740 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
50030.0013.3375.0083.6035.8632.3545.02
100036.6720.0065.0084.8032.3227.9444.46
150026.6720.0070.0083.8037.3726.8444.11
200036.6730.0077.5084.6033.3326.8448.16
250033.3330.0070.0083.0035.3527.5746.54
300030.0020.0067.5082.6030.8125.7442.78
350030.0026.6767.5082.2032.3226.1044.13
", + "bbox": [ + 129, + 185, + 867, + 321 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/1ee0c6d37cc65fe717551ff9bea8909244dc02f1b5139c065c55b0e684f3a275.jpg", + "table_caption": [ + "Table 7: Performance evaluation of Tina-STILL-3-1.5B-preview." + ], + "table_footnote": [], + "table_body": "
CHECKPOINT STEPS (5039 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
50030.0023.3367.5082.4039.3931.2545.65
100043.3326.6767.5086.2037.8828.6848.38
150030.0020.0080.0084.8032.8329.4146.17
200020.0026.6757.5080.6029.2924.2639.72
250013.3316.6752.5075.0031.3118.0134.47
300026.6716.6757.5078.6028.7923.1638.57
350023.3323.3362.5080.4031.8224.2640.94
400020.0020.0070.0082.0041.4127.9443.56
450023.3320.0072.5080.8034.8526.4742.99
500020.0026.6775.0080.8033.3329.4144.20
", + "bbox": [ + 129, + 369, + 867, + 551 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Table 8: Performance evaluation of Tina-DeepScaleR-1.5B-Preview.", + "bbox": [ + 84, + 564, + 609, + 579 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 387, + 69, + 609, + 80 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 895, + 948, + 911, + 955 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/f3bb7eda6f0c5f44a8d6892ff46ddcd98d6c1bdaa0dba782f01b360e1e83989e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CHECKPOINT STEPS (875 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5026.6723.3375.0084.2037.3729.0445.94
10030.0030.0065.0083.0037.3729.7845.86
15036.6716.6765.0084.8027.7827.9443.14
20020.0026.6770.0083.8033.3327.9443.62
25036.6720.0065.0084.6038.3828.3145.49
30033.3326.6770.0085.2030.8130.1546.03
35040.0016.6777.5084.4039.9027.9447.74
40030.0016.6770.0082.8035.8631.2544.43
45036.6726.6770.0085.6033.8432.7247.58
50036.6723.3382.5085.2037.3731.6249.45
55026.6716.6780.0086.0035.3529.7845.75
60030.0026.6770.0084.6037.8829.7846.49
65020.0023.3380.0085.0033.3327.9444.93
70033.3313.3372.5085.0040.4031.9946.09
75033.3323.3375.0083.6031.3127.5745.69
80030.0023.3365.0084.2038.3829.0444.99
85026.6726.6775.0083.8031.8227.9445.32
", + "bbox": [ + 129, + 138, + 867, + 441 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/4ca7537e6fe64a4ec131c6ed4f8cf75a1a629a084b6eeac69db32310ad2ec862.jpg", + "table_caption": [ + "Table 9: Performance evaluation of Tina-0pen-RS3." + ], + "table_footnote": [], + "table_body": "
CHECKPOINT STEPS (875 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5033.3323.3377.5084.2038.8929.0447.72
10036.6723.3372.5084.2031.3128.6846.12
15040.0023.3372.5085.8030.3030.5147.07
20026.6723.3370.0083.8039.3929.4145.43
25046.6713.3372.5082.6031.8230.5146.24
30030.0026.6775.0084.0033.3329.0446.34
35033.3320.0075.0084.8037.3728.6846.53
40026.6716.6770.0083.2037.3727.5743.58
45043.3326.6777.5087.0036.3632.7250.60
50020.0023.3367.5084.2033.8429.4143.05
55040.0023.3372.5083.6040.9130.8848.54
60033.3320.0072.5084.2032.8330.8845.62
65033.3323.3357.5083.8034.8530.5143.89
70023.3326.6770.0082.4033.3328.6844.07
75030.0023.3372.5084.2038.8929.0446.33
80030.0026.6775.0084.4032.3229.4146.30
85026.6723.3370.0083.8035.8628.6844.72
", + "bbox": [ + 129, + 545, + 867, + 848 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Table 10: Performance evaluation of Tina-0pen-RS2.", + "bbox": [ + 84, + 859, + 477, + 875 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 387, + 69, + 609, + 80 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 895, + 948, + 911, + 955 + ], + "page_idx": 22 + }, + { + "type": "table", + "img_path": "images/b4fc11af2a42cf76a840b1a3932aee83b2dd2f3789d5b131300587c7d4a21f96.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CHECKPOINT STEPS (2327 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
40033.3320.0075.0083.8031.8229.7845.62
60030.0030.0077.5084.2034.3431.6247.94
80043.3320.0080.0084.0035.3528.6848.56
100033.3320.0082.5084.4035.8629.7847.64
120036.6720.0067.5084.4037.8830.1546.10
140030.0020.0067.5083.4031.8229.7843.75
160023.3313.3365.0083.4035.8626.8441.29
180026.6720.0075.0084.2034.3427.5744.63
200030.0026.6772.5083.0036.3627.9446.08
220030.0023.3370.0081.4030.8126.4743.67
240030.0023.3367.5081.8030.3027.5743.42
", + "bbox": [ + 129, + 114, + 867, + 314 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/5ca376c85eec1315968cb1b29eabeab870f772630b1bec03adc8b5ab0477fc7f.jpg", + "table_caption": [ + "Table 11: Performance evaluation of Tina-0pen-RS1." + ], + "table_footnote": [], + "table_body": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
5020.0026.6767.5085.4037.8830.5144.66
10046.6720.0075.0083.8034.8530.5148.47
15026.6720.0072.5084.0037.3730.1545.12
20033.3330.0062.5083.4029.8030.8844.99
", + "bbox": [ + 129, + 367, + 867, + 452 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/259a17837b341a15b4289b19971555d0291ddd0ca62202c001f2e7d7e7a57c66.jpg", + "table_caption": [ + "Table 12: Performance evaluation of Tina-LIMR." + ], + "table_footnote": [], + "table_body": "
CHECKPOINT STEPS (11716 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
50030.0020.0077.5085.2033.8430.1546.12
100030.0023.3372.5085.6033.8426.6745.32
150036.6726.6775.0086.8039.9030.5149.26
200026.6723.3367.5083.2029.8031.6243.69
250030.0023.3372.5083.8033.8426.8445.05
300020.0030.0067.5084.6034.3428.3144.13
350036.6723.3367.5083.6031.3125.7444.69
", + "bbox": [ + 129, + 505, + 867, + 638 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/d67dbf5f5b71c11036cb87eea3eff99ae62340ff38a964f0b90e4e59d337bcbb.jpg", + "table_caption": [ + "Table 13: Performance evaluation of Tina-0penR1." + ], + "table_footnote": [], + "table_body": "
CHECKPOINT STEPS (8259 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
50033.3016.6777.5084.2035.8630.1546.28
100033.3323.3380.0085.2024.7532.7246.56
150030.0023.3370.0086.0037.8829.0446.04
200030.0023.3370.0084.2033.3328.3144.86
250036.6726.6772.5084.8041.4133.0949.19
300026.6723.3375.0083.6034.3432.7245.94
350020.0016.6760.0084.2032.3226.1039.88
400033.3323.3372.5083.6038.3827.9446.51
450030.0020.0065.0085.0033.8426.8443.45
500020.0033.3365.0084.8040.9130.8845.82
", + "bbox": [ + 129, + 691, + 867, + 875 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Table 14: Performance evaluation of Tina-OpenThoughts.", + "bbox": [ + 84, + 885, + 514, + 900 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 609, + 80 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 895, + 948, + 911, + 957 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/34438dc604081a53d0f3b2eb9a2446c6116792b8fa9876c28dc8d52561641a4a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CHECKPOINT STEPS (875 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5033.3316.6775.0083.8037.3726.8445.50
10016.6720.0070.0083.2033.3326.4741.61
15043.3323.3380.0085.0035.3530.1549.53
20030.0023.3370.0084.0039.9028.6845.99
25033.3330.0065.0083.8034.3428.3145.80
30036.6716.6767.5084.4037.8829.7845.48
35026.6730.0075.0084.0037.8829.7847.22
40036.6723.3372.5084.4032.8327.5746.22
45036.6716.6772.5085.6029.2927.5744.72
50030.0020.0072.5085.6037.3729.4145.81
55030.0023.3377.5084.8036.8731.6247.35
60033.3326.6772.5083.8030.3028.3145.82
65026.6720.0077.5082.4037.8827.9445.40
70036.6720.0080.0083.8035.3531.2547.85
75030.0026.6775.0084.2038.8927.5747.06
80020.0030.0075.0082.4035.8628.3145.26
85023.3320.0072.5085.4036.3630.1544.62
", + "bbox": [ + 127, + 118, + 869, + 421 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/2f6aefc175134f265b1b159383b902b03d40a9cce69d5e4661266307f817b9d4.jpg", + "table_caption": [ + "Table 15: Performance evaluation of Tina-0pen-RS3-DrGRPO." + ], + "table_footnote": [], + "table_body": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5020.0026.6767.5085.4037.8830.5144.66
10046.6720.0075.0083.8034.8530.5148.47
15026.6720.0072.5084.0037.3730.1545.12
20033.3330.0062.5083.4029.8030.8844.99
", + "bbox": [ + 129, + 484, + 867, + 571 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/3bceae34b8cb8c4220f13aedaf6a0d44948a79c1bbfe8ae7d3cc92a35320ed29.jpg", + "table_caption": [ + "Table 16: Performance evaluation of Tina-LIMR-5e-6-1r with learning rate 5e-6." + ], + "table_footnote": [], + "table_body": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5040.0013.3372.5083.0034.3429.0445.37
10043.3316.6777.5084.6034.8530.5147.91
15030.0023.3372.5086.2037.3730.5146.65
20033.3313.3370.0083.2029.2931.2543.40
", + "bbox": [ + 129, + 633, + 867, + 720 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/3f7a8c1ad3cb2877b0af8755abcdcfe54a8790edb795d23763fd06181cd40a01.jpg", + "table_caption": [ + "Table 17: Performance evaluation of Tina-LIMR-5e-7-1r with learning rate 5e-7." + ], + "table_footnote": [], + "table_body": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5020.0030.0077.5084.2038.3831.6246.95
10030.0023.3372.5084.6032.3229.7845.42
15036.6720.0070.0083.4031.8230.8845.46
20033.3320.0072.5085.0029.8029.4145.01
", + "bbox": [ + 129, + 782, + 867, + 869 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Table 18: Performance evaluation of Tina-LIMR-64-LoRA-rank with LoRA rank 64 and alpha 512.", + "bbox": [ + 83, + 878, + 812, + 895 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 611, + 80 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 893, + 946, + 911, + 958 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/e30c2021c32c397470ab042d61912768d1d764516c4830521a738697490ba23f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5033.3323.3362.5084.2038.8931.2545.58
10043.3333.3370.0083.2035.3528.3148.92
15026.6716.6772.5083.4035.3529.0443.94
20036.6720.0075.0083.0039.3930.5147.43
", + "bbox": [ + 129, + 104, + 867, + 191 + ], + "page_idx": 25 + }, + { + "type": "table", + "img_path": "images/2863c059feaad5bb8d161330289c274b045111a2d87ecc5977c5da81987d3f33.jpg", + "table_caption": [ + "Table 19: Performance evaluation of Tina-LIMR-16-LoRA-rank with LoRA rank 16 and alpha 64." + ], + "table_footnote": [], + "table_body": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
5030.0026.6782.5083.8033.8430.5147.89
10026.6716.6772.5084.0036.8729.7844.42
15053.3320.0060.0083.2037.3730.8847.46
20023.3320.0072.5085.4032.8328.6843.86
", + "bbox": [ + 129, + 239, + 867, + 325 + ], + "page_idx": 25 + }, + { + "type": "table", + "img_path": "images/87ea49257f6941cd6e4541f776c070114d4a35cf437f8e9258c5111a25444c07.jpg", + "table_caption": [ + "Table 20: Performance evaluation of Tina-LIMR-8-LoRA-rank with LoRA rank 8 and alpha 32." + ], + "table_footnote": [], + "table_body": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
5030.0023.3365.0085.0035.3529.7844.74
10026.6726.6772.5082.8034.8529.0445.42
15036.6720.0085.0083.8031.8229.047.72
20033.3323.3377.5085.4035.8628.3147.29
", + "bbox": [ + 129, + 373, + 867, + 459 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Table 21: Performance evaluation of Tina-LIMR-4-LoRA-rank with LoRA rank 4 and alpha 16.", + "bbox": [ + 84, + 469, + 781, + 484 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 609, + 80 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 895, + 946, + 911, + 958 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "E. Full Tina Model Training Phase Transition", + "text_level": 1, + "bbox": [ + 83, + 104, + 532, + 126 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "In this section, we present all Tina models' training phase transitions along the training dynamics. Specifically, we observe clear phase transitions in the training of Tina-DeepScaleR-1.5B-Preview, Tina-STILL-3-1.5B-preview, Tina-Open-RS1, Tina-Open-RS2, Tina-Open-RS3, and Tina-Open-RS3-GRPO, as shown in Figures 5, 6, and 7. For Tina-OpenR1 and Tina-Thoughts (Figures 8 and 9), the observation is similar, except the best-performing checkpoint is achieved after the training turning point, rather than before. However, we do not observe such a transition in all Tina variants on the LIMR dataset, as shown in Figures 10, 11, and 12, possibly because its small data size leads to training periods which are too brief to extract meaningful information.", + "bbox": [ + 80, + 138, + 916, + 277 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/d353ef6fc3f55dfc1422f1740441c1355ae02f826e634eac7f3afe7a8106e2b5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 124, + 290, + 495, + 454 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/e227c2e22316cb4b9419f97f054e7fabe4740818619f91e7c8b4fe40c3152bac.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 290, + 870, + 454 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/0e2732e82b9d5d73f809168cfbc974b98f9f0044648299abe8a4b63ae8b60533.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 124, + 458, + 493, + 625 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/94c0a15fc368a7728149a7406671dbf227cdd83e77da83147f99fd9a06986856.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 458, + 870, + 625 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/14dd99703ddfbd21f42c03322ea1708b90ea5a70843aba660bc25ee705707508.jpg", + "image_caption": [ + "Figure 5: Phase transition in Tina-DeepScaleR-1.5B-Preview and Tina-STILL-3-1.5B-Preview. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + ], + "image_footnote": [], + "bbox": [ + 125, + 627, + 493, + 792 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/0b36de5f11dfc263cdad476fb93fe133556cacd1abefa32b7b6253470e317fec.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 627, + 870, + 792 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 611, + 80 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 893, + 946, + 911, + 958 + ], + "page_idx": 26 + }, + { + "type": "image", + "img_path": "images/bc4af8c268796a19b9e7ca41828353aa9b004373143e6b9529eeeb8ed4804fab.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 124, + 234, + 491, + 398 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/b191a5bf97faa7c45c1fee1dc686ecc9bfe74d6f70924690bd7ccbbb418b18d1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 234, + 870, + 398 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/473a1311c96974b772b1c964513df3b56b622e527385e32bcac20487ab0f608b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 125, + 402, + 491, + 566 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/125d835e1868d60ed3c9237be1434952fbe0d18bf4c96c75bd2bcb9459f8c2c9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 402, + 870, + 566 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/cb82a48a302ec32484b525d394a3919acf1c3e6f8775d6f31572654513eb4bf8.jpg", + "image_caption": [ + "Figure 6: Phase transition in Tina-0pen-RS1 and Tina-0pen-RS2. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + ], + "image_footnote": [], + "bbox": [ + 125, + 571, + 491, + 736 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/00890f19ec10de6d30defeffa76f84c3755edb43396146fd6ecd1a3608db87a5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 571, + 870, + 736 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 611, + 80 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 893, + 946, + 911, + 958 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/552fe327e350e53b6edd0dae81e7b1d91bfa404f3cf9e8181fcb5171edcfe6e2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 124, + 234, + 493, + 398 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/6860139cead7f0972829f859c4daa9a36307284fcbfceeda15ecec531e559f3c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 234, + 870, + 398 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/496fcd61ed8d480b0945f217eb5a6f72daca5483eeed7daf40f65de2b5759fde.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 125, + 402, + 493, + 566 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/da84d29ddbc030bb2732b675abae652384353c1ddbd7c6ca78bb6eac68830c8a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 402, + 870, + 566 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/2ada93971be1b948ee6a2f2f11c7befda25eb181ff27c2b09831de984ff64ceb.jpg", + "image_caption": [ + "Figure 7: Phase transition in Tina-Open-RS3 and Tina-Open-RS3-GRPO. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + ], + "image_footnote": [], + "bbox": [ + 125, + 571, + 493, + 736 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/6cab3827b7f7c4ceceade4215a0f8483d0bde3adef8952c5ce5f9ed175c0d13d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 571, + 870, + 736 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 611, + 80 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 893, + 946, + 911, + 958 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/9cea77735d3cf6c3ebdecad0073a818a5f4f1cca13aa643d18fce481c4540429.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 127, + 150, + 493, + 314 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/a288133d71b7c9dc147e7ee5f78170f5e2d79b32a4c40cea023285708958f07c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 150, + 870, + 314 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/4ccbe6938b7d744c676eb276e92dafd3e742f1a5f1bd949cac46ae8d3ac3be6e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 127, + 319, + 493, + 483 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/bf53e4cf04bfe1cc8a3830d64c251087ca403806e1f7125338cfdeb5cdd2d04b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 319, + 870, + 483 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/38be411b859adab5f6a4a1d0e537338c225d76824738d7c50323bcdde5094d26.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 127, + 488, + 493, + 651 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/35c5b458c2182b9b490a45b3816e4fc2f50b5128398cf5a2ca7d6ddf42470a1b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 488, + 870, + 651 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/1f8ce392dcce7770903d3307c0a3c06e266cc11aacf0341e740d2ab6a0d0da40.jpg", + "image_caption": [ + "Figure 8: Phase transition in Tina-0penR1. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + ], + "image_footnote": [], + "bbox": [ + 127, + 656, + 493, + 820 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/d77248ceec0597e3173ad7abb2455e9c6b9bd88d21d80b0ff7244825d3df6bc0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 656, + 870, + 820 + ], + "page_idx": 29 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 611, + 80 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 893, + 946, + 911, + 958 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/48735e2ffbf120d3300187ac8dc036e9eb73a17de816b992af60c8f0bba9dee0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 127, + 150, + 493, + 314 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/bc9a0f1c786c904a3f69ed882f4cf22f1a6a30c05c7bf904d4badae2ee5de727.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 150, + 870, + 314 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/7687d000e20a32466588a78df3334c316590d1f20e9a7d47bf25e95f2358b2fc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 125, + 319, + 493, + 483 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/e12f87e58a46f826adb346894e3dc14c83b821d5f0533d83abf938406e0ec4c7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 319, + 870, + 483 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/30264a3080c01d7d363c382e51e0d8e0b3db806e01af37dd0be555d1b1175270.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 125, + 488, + 493, + 652 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/93c8ae4fc2db00f11f3c86a37f454a2dd65aef08a5cee4ddac7578f774212764.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 488, + 870, + 652 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/d0a6b65304f02a110defa4ef71b15594342b223bbbf05dc73dd44d06fd2fb35c.jpg", + "image_caption": [ + "Figure 9: Phase transition in Tina-OpenThoughts. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + ], + "image_footnote": [], + "bbox": [ + 125, + 656, + 493, + 820 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/1f769eabe9d3b95792ee514dca322f1a413030ec9276fa2fa355fb3eeeebcc94.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 656, + 870, + 820 + ], + "page_idx": 30 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 611, + 80 + ], + "page_idx": 30 + }, + { + "type": "page_number", + "text": "31", + "bbox": [ + 893, + 946, + 910, + 958 + ], + "page_idx": 30 + }, + { + "type": "image", + "img_path": "images/6863227a9bb5aee1c96f10a6063b1ed571d1bf23545e337ee8f67927af9938c9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 98, + 299, + 359, + 416 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/8e7d37997e4454107c9b3f1d5d83cd73d2c4e31cb6d40d54edf747a32eeab3c1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 366, + 299, + 629, + 416 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/e10e05106fdbb6069713e797be132f798dd8dd77c486e0e23785c982fa11ca63.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 635, + 299, + 898, + 417 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/f82684fd818799f06d8422102e17ce5a54eb80cd17d33223ef24703ede35a673.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 98, + 419, + 361, + 537 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/94b7e9fa1ffeecc96b0ef41ea51000b26d02f97e7c1e3593294689e973f839ad.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 366, + 419, + 629, + 537 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/e26a69ce1c5d5ce240e9dc3cff8e42ad98280111c18653b9dbec50810ca60eca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 635, + 419, + 898, + 537 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/5a9ddd7c6ab7cbde615412641ceea6c25f0f3366649e5f32dd25392dc06b6c70.jpg", + "image_caption": [ + "Figure 10: Phase transition in Tina-LIMR, Tina-LIMR-64-LoRA-rank and Tina-LIMR-16-LoRA-rank. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + ], + "image_footnote": [], + "bbox": [ + 96, + 539, + 361, + 657 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/ed65f9fd40831f8b3283fbd88857fbde1efe7a1a747af3aad5020ea0ab07ecfb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 366, + 539, + 629, + 657 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/04e83e21598d670a72734bc1836dedfcb3fa2769f649de2a33509249caabd40a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 635, + 539, + 898, + 657 + ], + "page_idx": 31 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 611, + 80 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 893, + 946, + 911, + 958 + ], + "page_idx": 31 + }, + { + "type": "image", + "img_path": "images/919f8c76c34c77a7b4d22d069a3190f1356853252c2e8e191e1d7805afc3a619.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 127, + 234, + 493, + 398 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/680477ed58ba6ca295f1e70b13709cb218a8a1b3b052246ddbe20f98c2db2562.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 234, + 870, + 398 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/15737af1ce0d1a20f369f184d8ff8cb4509c833fe136e34db3dc3a6b909812f2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 125, + 402, + 493, + 566 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/7c0cee9452adf0935fd7975a14037e4d33ede03232890ec890460647a16fa1c2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 402, + 870, + 566 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/e293c0018a412234e72b8b60b87a9e3dca0699425d5b9a868e7d6995c7a461f3.jpg", + "image_caption": [ + "Figure 11: Phase transition in Tina-LIMR-8-LoRA-rank and Tina-LIMR-4-LoRA-rank. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + ], + "image_footnote": [], + "bbox": [ + 125, + 571, + 493, + 736 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/bbe93058d5e119281dbe2a2a7453e28bb4b576cf301184abcc57d8ced53d3f62.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 571, + 870, + 736 + ], + "page_idx": 32 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 611, + 80 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 893, + 946, + 911, + 958 + ], + "page_idx": 32 + }, + { + "type": "image", + "img_path": "images/f4186b7a62fdd3d663ffed363a38e286da6614aad9564a13f40b27a02c1f824c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 124, + 234, + 495, + 398 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/40017567513cbf6d1900af9033c2f09bd1c6aef4b039218e1da821aa0c9116ef.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 234, + 870, + 398 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/74cbc8332e1e8c7a7c24513fe7fc932848cb5796a13820024616f51ed8f084a3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 125, + 402, + 493, + 566 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/47eb8632f9c85c72c92536872469d78675758b2e5cda7141a97c301b03c3f345.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 402, + 870, + 566 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/92d66da5abf488c5b52ee99dc185b1dc09d8af4a68e2fbe8d2d7f207a0009495.jpg", + "image_caption": [ + "Figure 12: Phase transition in Tina-LIMR-5e-6-1r and Tina-LIMR-5e-7-1r. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + ], + "image_footnote": [], + "bbox": [ + 125, + 571, + 493, + 736 + ], + "page_idx": 33 + }, + { + "type": "image", + "img_path": "images/9963c4c73368a08be9b4ba416a12dbe2b4ef4ec1b7263f7133125609784b4ce9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 571, + 870, + 736 + ], + "page_idx": 33 + }, + { + "type": "header", + "text": "Tina: Tiny Reasoning Models via LoRA", + "bbox": [ + 385, + 68, + 611, + 80 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 893, + 946, + 911, + 958 + ], + "page_idx": 33 + } +] \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15777/7d52c4ec-83bf-4780-930e-43bf666b3c1c_model.json b/data/2025/2504_15xxx/2504.15777/7d52c4ec-83bf-4780-930e-43bf666b3c1c_model.json new file mode 100644 index 0000000000000000000000000000000000000000..bb7f46bb119bcd86d75e08d663e56a40cd154152 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/7d52c4ec-83bf-4780-930e-43bf666b3c1c_model.json @@ -0,0 +1,4008 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.265, + 0.061, + 0.709 + ], + "angle": 270, + "content": "arXiv:2504.15777v1 [cs.CL] 22 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.188, + 0.107, + 0.775, + 0.137 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.149, + 0.763, + 0.186 + ], + "angle": 0, + "content": "Shangshang Wang1, Julian Asilis1, Ömer Faruk Akgül1, Enes Burak Bilgin1, Ollie Liu1, and Willie Neiswanger1" + }, + { + "type": "text", + "bbox": [ + 0.356, + 0.195, + 0.602, + 0.214 + ], + "angle": 0, + "content": "1University of Southern California" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.23, + 0.886, + 0.479 + ], + "angle": 0, + "content": "How cost-effectively can strong reasoning abilities be achieved in language models? Driven by this fundamental question, we present Tina, a family of tiny reasoning models achieved with high cost-efficiency. Notably, Tina demonstrates that substantial reasoning performance can be developed using only minimal resources, by applying parameter-efficient updates during reinforcement learning (RL), using low-rank adaptation (LoRA), to an already tiny 1.5B parameter base model. This minimalist approach produces models that achieve reasoning performance which is competitive with, and sometimes surpasses, SOTA RL reasoning models built upon the same base model. Crucially, this is achieved at a tiny fraction of the computational post-training cost employed by existing SOTA models. In fact, the best Tina model achieves a \\(>20\\%\\) reasoning performance increase and \\(43.33\\%\\) Pass@1 accuracy on AIME24, at only $9 USD post-training and evaluation cost (i.e., an estimated 260x cost reduction). Our work reveals the surprising effectiveness of efficient RL reasoning via LoRA. We validate this across multiple open-source reasoning datasets and various ablation settings starting with a single, fixed set of hyperparameters. Furthermore, we hypothesize that this effectiveness and efficiency stem from LoRA rapidly adapting the model to the structural format of reasoning rewarded by RL, while largely preserving the base model's underlying knowledge. In service of accessibility and open research, we fully open-source all code, training logs, and model weights & checkpoints." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.495, + 0.534, + 0.513 + ], + "angle": 0, + "content": "Notion Blog: https://shangshangwang.notion.site/tina" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.513, + 0.585, + 0.53 + ], + "angle": 0, + "content": "Code Repository: https://github.com/shangshang-wang/Tina" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.53, + 0.583, + 0.546 + ], + "angle": 0, + "content": "Training Logs: https://wandb.ai/upup-ashton-wang-usc/Tina" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.546, + 0.596, + 0.563 + ], + "angle": 0, + "content": "Model Weights & Checkpoints: https://huggingface.co/Tina-Yi" + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.495, + 0.596, + 0.563 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.607, + 0.241, + 0.625 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.64, + 0.918, + 0.882 + ], + "angle": 0, + "content": "Language models (LMs) demonstrate increasing proficiency across a variety of tasks, but achieving robust, multi-step reasoning remains a frontier challenge (Wang and Neiswanger, 2025, Xu et al., 2025). Notably, such reasoning abilities are crucial for applications demanding complex problem-solving, from scientific discovery to intricate planning. Enhancing complex reasoning via supervised fine-tuning (SFT) is a well-adopted technique, often utilizing a distillation process (Min et al., 2024, Huang et al., 2024) by which the model learns to mimic reasoning traces (e.g., step-by-step thinking) generated by more advanced models such as o1 (OpenAI, 2024). This approach, while effective, relies upon the quality and availability of such expert demonstrations, which can be costly to obtain. Furthermore, it can run the risk of instilling a shallow form of imitation in the learning model, rather than fostering dynamic exploration of reasoning paths. In contrast, reinforcement learning (RL) enables models to learn directly and flexibly from verifiable reward signals derived from curated data (DeepSeek-AI, 2025, Lambert et al., 2025). In doing so, RL can lead the model to explore a greater variety of logical paths and possibly discover more robust solutions. However, RL pipelines are often complex and notoriously resource-intensive, typically involving substantial compute. This raises a fundamental question anchoring our research:" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.897, + 0.828, + 0.915 + ], + "angle": 0, + "content": "How cost-effectively can one perform RL to efficiently instill reasoning abilities in LMs?" + }, + { + "type": "footer", + "bbox": [ + 0.084, + 0.945, + 0.694, + 0.959 + ], + "angle": 0, + "content": "Corresponding author(s): Shangshang Wang shangshangwang.github.io; Willie Neiswanger neiswang@usc.edu" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.612, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "image", + "bbox": [ + 0.094, + 0.109, + 0.296, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.299, + 0.109, + 0.5, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.109, + 0.703, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.706, + 0.108, + 0.904, + 0.253 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.258, + 0.916, + 0.307 + ], + "angle": 0, + "content": "Figure 1: Overall comparison between Tina and baseline models. The Tina model in the figure corresponds to the best checkpoint in Table 10. Reasoning performance denotes the average score across AIME24/25, AMC23, MATH500, GPQA, and Minerva, as described in Section 3. The calculation of each comparative metric is detailed in Appendix A." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.321, + 0.917, + 0.596 + ], + "angle": 0, + "content": "Our pursuit of this question necessitates a deliberate move towards minimalism. Rather than utilizing models with tens of billions of parameters (such as Qwen-7B/32B, QwQ-32B-preview, and their variants (Min et al., 2024, NovaSky Team, 2025, Zeng et al., 2025, Muennighoff et al., 2025, Cui et al., 2025, Lyu et al., 2025, OpenThoughts Team, 2025, Hu et al., 2025)), we instead direct our attention to tiny models. In particular, we use the 1.5B parameter model, DeepSeek-R1-Distill-Qwen-1.5B (DeepSeek-AI, 2025). Our choice of this base model aligns with common practices in recent research (RUCAIBox STILL Team, 2025, Luo et al., 2025, Dang and Ngo, 2025): we begin with a foundation that, owing to its specific lineage (DeepSeek/Qwen) and distillation process, likely possesses stronger initial reasoning aptitude compared to a generic pre-trained model of equivalent size. This strategic starting point allows us to more-rigorously evaluate the incremental reasoning enhancements imparted by RL, thereby isolating and measuring the effectiveness of the technique itself over a competent baseline. More importantly, selecting such an architecture dramatically lowers the computational and financial threshold for experimentation. Complementing the choice of a compact base model, we further amplify efficiency during the RL phase and integrate parameter-efficient post-training by employing low-rank adaptation (LoRA) (Hu et al., 2021). Notably, LoRA enables the modification of a model's behavior by training only an exceptionally small number of new parameters. This dovetails with our central motivation: achieving reasoning capabilities through the most economical means possible." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.604, + 0.916, + 0.656 + ], + "angle": 0, + "content": "Integrating the previous two components—a “tiny” model architecture and a “tiny” post-training via LoRA-based RL—we release the Tina (Tiny Reasoning Models via LoRA) family of models, which attain substantial reasoning performance at strikingly low cost. In total, we summarize our contributions as follows:" + }, + { + "type": "text", + "bbox": [ + 0.098, + 0.664, + 0.913, + 0.735 + ], + "angle": 0, + "content": "- Surprising Effectiveness of Efficient RL Reasoning. We show that our Tina models achieve performance competitive with, and in some cases even superior to, SOTA baseline models built on the same base model with full-parameter training, as shown in Figure 1 and in more detail in Table 3. In particular, the best Tina model achieves a \\(>20\\%\\) performance increase and \\(43.33\\%\\) Pass@1 accuracy on AIME24." + }, + { + "type": "text", + "bbox": [ + 0.098, + 0.74, + 0.915, + 0.895 + ], + "angle": 0, + "content": "- Rapid Reasoning Format Adaptation Hypothesis. Based on our observations in post-training Tina, we hypothesize that LoRA's effectiveness and efficiency stem from rapidly adapting the reasoning format under RL while preserving base model knowledge—a likely more compute-efficient process than the deep knowledge integration of full-parameter training. Partial support comes from studies showing tiny LMs can reason effectively (Hugging Face, 2025, DeepSeek-AI, 2025), while large LMs can store broader world knowledge (Allen-Zhu and Li, 2025). This distinction suggests reasoning capabilities can be significantly enhanced by focusing on adapting the output format itself, consistent with our hypothesis about LoRA. To test this, we exclusively train LoRA parameters in RL settings, focusing on leveraging this format adaptation mechanism." + }, + { + "type": "list", + "bbox": [ + 0.098, + 0.664, + 0.915, + 0.895 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.903, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.612, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "image", + "bbox": [ + 0.104, + 0.105, + 0.895, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.36, + 0.916, + 0.394 + ], + "angle": 0, + "content": "Figure 2: Release timeline of open-source models that aim to replicate the performance of advanced reasoning models like o1(-preview) (OpenAI, 2024) and R1 (DeepSeek-AI, 2025), which we refer to as open-source reasoning replicas." + }, + { + "type": "text", + "bbox": [ + 0.098, + 0.418, + 0.917, + 0.522 + ], + "angle": 0, + "content": "- Democratizing RL Reasoning. We provide a reproducible and highly cost-effective approach, enabling wider participation in the exploration of RL techniques without requiring extensive computational resources. Notably, the cost of reproducing the best Tina checkpoint stands at only \\(9, and of reproducing all our experiments and everything presented in this paper from scratch at \\)526. Furthermore, in line with our goal of promoting accessible research, we release all code, training logs, evaluation scripts, and all Tina checkpoints." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.545, + 0.251, + 0.563 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.58, + 0.4, + 0.598 + ], + "angle": 0, + "content": "2.1. Open-Source Reasoning Replicas" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.607, + 0.917, + 0.883 + ], + "angle": 0, + "content": "As shown in Figure 2, following the release of o1-preview (OpenAI, 2024), a number of open-source models have emerged to replicate or exceed its reasoning capabilities. STILL (Min et al., 2024) introduced a minimal yet high-quality training recipe designed to elicit reasoning with modest compute, demonstrating that imitation learning from curated traces remains competitive. Sky-T1 (NovaSky Team, 2025) further explored scaling using open instruction-tuned checkpoints, while SimpleRL (Zeng et al., 2025) highlighted the potential of lightweight RL without requiring large-scale reward models. PRIME (Cui et al., 2025) and DeepScaleR (Luo et al., 2025) introduced process supervision and scaling experiments to isolate how reasoning quality evolves with model size and context length. s1 (Muennighoff et al., 2025) showed that even strong base models such as Qwen2.5-32B-Instruct benefit from fine-tuning on only 1k high-quality and long chain-of-thought data, which is curated to elicit reasoning capabilities. L1 (Aggarwal and Welleck, 2025) combined prompt engineering with data curation for RL, resulting in models that can efficiently and adaptively control their response length. Meanwhile, OREAL (Lyu et al., 2025) and OpenThinker (OpenThoughts Team, 2025) investigated self-correction and latent structure emergence through unsupervised and hybrid paradigms. The release of Open Reasoner Zero (Hu et al., 2025) and Open-RS (Dang and Ngo, 2025) further emphasized efficient RL-based strategies for reasoning with small models, completing a landscape of public alternatives for interpretability and reproducibility." + }, + { + "type": "page_number", + "bbox": [ + 0.903, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.612, + 0.082 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.108, + 0.352, + 0.125 + ], + "angle": 0, + "content": "2.2. RL with Verifiable Rewards" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.136, + 0.918, + 0.361 + ], + "angle": 0, + "content": "Reasoning tasks are well-suited to RL paradigms, as the correctness or quality of the final output often provides verifiable reward signals (e.g., the validity of a logical deduction). Such signal can effectively guide the model towards learning more robust reasoning strategies. Consequently, various RL approaches have been explored within this domain. Certain methods introduce auxiliary reward models or critics to assess reasoning quality, such as ReFT (Luong et al., 2024) and REFINER (Paul et al., 2024). Other techniques employ explicit rule-based verification for self-correction (Wu et al., 2024). Some leverage self-play dynamics and exploration, such as mutual reasoning (Qi et al., 2024), or utilize inference-aware fine-tuning that optimizes performance under different sampling strategies (Chow et al., 2024). Notably, Group Relative Policy Optimization (GRPO) has been proposed as a variant of Proximal Policy Optimization (PPO) which removes the need for a separate value network by using a group-based baseline for advantage estimation, improving training efficiency and leading to better reward alignment (Shao et al., 2024), as demonstrated by DeepSeek-R1 (DeepSeek-AI, 2025). Subsequently, Dr.GRPO (Liu et al., 2025) introduced a subtle modification of GRPO addressing its bias to produce long responses. For completeness, we provide the standard formulation of GRPO in Appendix B." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.384, + 0.308, + 0.401 + ], + "angle": 0, + "content": "2.3. Low-Rank Adaptation" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.412, + 0.919, + 0.568 + ], + "angle": 0, + "content": "While most existing open models that enable reasoning rely on the more expensive full-parameter training (Min et al., 2024, NovaSky Team, 2025, Zeng et al., 2025, Muennighoff et al., 2025, Aggarwal and Welleck, 2025, Cui et al., 2025, Luo et al., 2025, Lyu et al., 2025, OpenThoughts Team, 2025, Hu et al., 2025, Dang and Ngo, 2025), we investigate the use of LoRA for parameter-efficient post-training of reasoning models (Hu et al., 2021). Our goal is to assess whether updating only a small fraction of parameters can still yield strong reasoning capabilities (Han et al., 2024). In addition to its computational efficiency, LoRA provides modularity: by training only a low-rank decomposition of the parameter updates, it becomes possible to toggle reasoning behavior without maintaining multiple full model copies. For completeness, we provide the standard formulation of LoRA in Appendix B." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.593, + 0.497, + 0.612 + ], + "angle": 0, + "content": "3. Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.626, + 0.918, + 0.748 + ], + "angle": 0, + "content": "Tina is our family of models created by post-training the DeepSeek-R1-Distill-Qwen-1.5B base model using LoRA during RL (employing a GRPO-style algorithm). The \"Tiny\" designation encapsulates a deliberate focus on minimalism and efficiency across the entire framework. This encompasses not only the tiny base model architecture and the tiny parameter updates enabled by LoRA, but also extends to a tiny overall resource footprint. This minimized footprint is achieved through an efficient training pipeline leveraging accessible open-source datasets and codebase (detailed in Section 3.1), and requires only minimal hardware and budget resources (described in Section 3.2)." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.771, + 0.455, + 0.789 + ], + "angle": 0, + "content": "3.1. Training Pipeline: Baselines & Datasets" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.8, + 0.918, + 0.852 + ], + "angle": 0, + "content": "To facilitate meaningful comparisons and enable precise ablations, we post-train our Tina models via RL using the datasets and setups from publicly available reasoning models. All Tina and baseline models adopt DeepSeek-R1-Distill-Qwen-1.5B as their base model checkpoint with default open-source weights." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.863, + 0.918, + 0.916 + ], + "angle": 0, + "content": "- STILL-3-1.5B-preview (RUCAIBox STILL Team, 2025) is a slow-thinking reasoning model developed through iterative RL on a curated dataset of \\(33\\mathrm{k}\\) reasoning traces. The data originates from mathematics competitions and includes problems from MATH (Hendrycks et al., 2021, Lightman et al.," + }, + { + "type": "page_number", + "bbox": [ + 0.902, + 0.949, + 0.913, + 0.959 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.385, + 0.069, + 0.612, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.108, + 0.916, + 0.143 + ], + "angle": 0, + "content": "2023), NuminaMathCoT (LI et al., 2024), and AIME (1983-2023) (Art of Problem Solving, 2024). Tina-STILL-3-1.5B-preview uses the same dataset and reward pipeline." + }, + { + "type": "text", + "bbox": [ + 0.098, + 0.149, + 0.916, + 0.235 + ], + "angle": 0, + "content": "- DeepScaleR-1.5B-Preview (Luo et al., 2025) focuses on long-context mathematical reasoning via RL, and is trained over approximately 40k problem-answer pairs drawn from the AIME (Art of Problem Solving, 2024), AMC (Art of Problem Solving, 2023), OMNI-MATH (Gao et al., 2024a), and STILL (RUCAIBox STILL Team, 2025) datasets. Tina-DeepScaleR-1.5B-Preview uses this dataset and mirrors the reward design." + }, + { + "type": "text", + "bbox": [ + 0.098, + 0.242, + 0.917, + 0.329 + ], + "angle": 0, + "content": "- Open-RS1/2/3 (Dang and Ngo, 2025) are three models from the Open-RS project exploring reasoning performance in 1.5B models trained via RL. All Open-RS models are trained on small, high-quality datasets further curated from the s1 (Muennighoff et al., 2025) (i.e., Open-S1) and DeepScaleR (Luo et al., 2025) (i.e., Open-DeepScaleR) datasets. The Tina models (Tina-Open-RS1/2/3) replicate these setups, using identical data splits and reward scaffolding." + }, + { + "type": "list", + "bbox": [ + 0.098, + 0.149, + 0.917, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.083, + 0.353, + 0.461, + 0.371 + ], + "angle": 0, + "content": "3.2. Training Setup: Infrastructure & Budget" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.383, + 0.918, + 0.489 + ], + "angle": 0, + "content": "Training Codebase. Our implementation builds upon OpenR1, a fully open reproduction of DeepSeek-R1 (DeepSeek-AI, 2025) which combines the Accelerate (Gugger et al., 2022) and Trl (von Werra et al., 2020) libraries and the DeepSpeed ZeRO optimization (Rajbhandari et al., 2019). It aims to transparently replicate and extend RL methods used for improving reasoning in language models, particularly focusing on aligning model behavior with reasoning-oriented objectives via verifiable reward signals. Our methodology inherits its scaffolding, training utilities, and reward interfaces." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.495, + 0.929, + 0.634 + ], + "angle": 0, + "content": "Training Hyperparameters. We initiated parameter selection by replicating key parameters from OpenR1 (Hugging Face, 2025) and OpenRS (Dang and Ngo, 2025). For all experiments presented in this paper, we deliberately adopted the default or recommended hyperparameter configurations provided in their works. These settings were kept largely fixed across different runs (Table 5). For the main Tina results (Section 4.2), only reward function parameters were adjusted per task, and for ablation studies (Section 4.3), only the specific factor under investigation (e.g., learning rate, LoRA rank/alpha, RL algorithm) was varied (Table 6). This approach intentionally circumvents costly hyperparameter search procedures for our specific setup, ensuring negligible tuning overhead and focusing on the efficacy of the core LoRA-based RL methodology." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.641, + 0.918, + 0.779 + ], + "angle": 0, + "content": "Training Hardware. A key element of our low-cost approach was minimizing the hardware footprint. While distributed RL training algorithms like GRPO often benefit from using three or more GPUs (e.g., dedicating one GPU to an inference engine such as vLLM for faster sample generation), we deliberately targeted a minimal setup using only two NVIDIA L40S GPUs. To enable this, we co-located the RL training process and the vLLM on the same two GPUs by constraining vLLM's GPU memory usage. The training itself utilized data parallelism across both GPUs. While running inference and training concurrently on two GPUs might result in a longer wall-clock training time compared to a setup with dedicated inference GPUs, it significantly reduces the hardware requirement." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.786, + 0.916, + 0.839 + ], + "angle": 0, + "content": "Training Budget. The NVIDIA L40S GPUs we use are accessible via commercial cloud platforms at an approximate rate of \\(1 USD per GPU hour, including 300 GB storage, based on pricing observed at the time of writing (Cudo Compute). The RL training process for our LoRA models proved highly efficient, with a" + }, + { + "type": "page_footnote", + "bbox": [ + 0.102, + 0.848, + 0.439, + 0.865 + ], + "angle": 0, + "content": "1https://github.com/huggingface/open-r1" + }, + { + "type": "page_footnote", + "bbox": [ + 0.082, + 0.865, + 0.916, + 0.911 + ], + "angle": 0, + "content": "2Occasionally, NVIDIA RTX 6000 Ada GPUs were used instead, which is reflected in the system configuration metadata on Weights & Biases. From our practical experience, these two GPU types are similar in terms of cost and computational performance. For consistency, we report costs and compute metrics based on the L40S." + }, + { + "type": "list", + "bbox": [ + 0.082, + 0.848, + 0.916, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.903, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.612, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "table", + "bbox": [ + 0.108, + 0.106, + 0.892, + 0.459 + ], + "angle": 0, + "content": "
EXPERIMENTAL TASKTRAINING COST EST.EVALUATION COST EST.TOTAL COST EST.
Baseline: Model Re-Evaluation-$6$6
Main: Tina-STILL-3-1.5B-preview$59$7$66
Main: Tina-DeepScaleR-1.5B-Preview$84$10$94
Main: Tina-Open-RS1$40$11$51
Main: Tina-Open-RS2$15$17$32
Main: Tina-Open-RS3$15$17$32
Ablation: OpenThoughts Dataset$84$10$94
Ablation: OpenR1 Dataset$59$7$66
Ablation: LIMR Dataset$4$4$8
Ablation: DrGRPO Algorithm$15$17$32
Ablation: Learning Rate$7$8$15
Ablation: LoRA Rank/Alpha$14$16$30
Total: All Tasks$396$130$526
Total: Main Tasks$213$62$275
Total: Best Ckpt. in Each Main Task$80$5$85
Total: All Ckpt. in Best-Performance Task$14$17$31
Total: Best Ckpt. in Best-Performance Task$8$1$9
" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.468, + 0.916, + 0.531 + ], + "angle": 0, + "content": "Table 1: Computational cost breakdown. Costs for all experimental tasks in this paper, measured in USD. The row \"Best Ckpt. in Each Main Task\" denotes the cost of reproducing the best checkpoint in each of Table 7, 8, 9, 10, 11. The row \"All Ckpt. in Best-Performance Task\" denotes the cost of reproducing all checkpoints in Table 10. \"Best Ckpt. in Best-Performance Task\" denotes the cost of reproducing the best checkpoint in Table 10, i.e., the checkpoint at step 450." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.545, + 0.918, + 0.667 + ], + "angle": 0, + "content": "single RL step typically completing within one minute on this hardware. Evaluating a model checkpoint across our entire suite of six reasoning benchmarks required approximately 1 L40S GPU hours on average. To ensure cost control, we initially established a conservative maximum budget of \\(100 USD for each complete experimental run, encompassing all stages from training to evaluation and miscellaneous tasks. As detailed in Table 1, our actual expenditures were significantly below this ceiling. Our calculation is based on the full Tina model evaluation performance in Appendix D. We believe this low cost makes our setup an accessible testbed for the research community." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.685, + 0.696, + 0.706 + ], + "angle": 0, + "content": "4. Surprising Effectiveness of Efficient RL Reasoning via LoRA" + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.72, + 0.553, + 0.739 + ], + "angle": 0, + "content": "4.1. Experiments Stage I: Baseline Model Re-Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.748, + 0.918, + 0.853 + ], + "angle": 0, + "content": "Before presenting Tina's performance, it is crucial to establish fair and reliable comparisons against existing SOTA reasoning models. We note that performance scores reported in the literature for relevant models often stem from evaluations using disparate frameworks (e.g., verl (Sheng et al., 2025), lighteval (Fourrier et al., 2023), lm-eval-harness (Gao et al., 2024b)) and inconsistent inference settings (such as different generation hyperparameters or varying numbers of GPUs). These variations can significantly influence reported metrics, creating potential inconsistencies and hindering reliable comparisons between models." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.859, + 0.916, + 0.913 + ], + "angle": 0, + "content": "To mitigate these confounding factors, we performed a comprehensive re-evaluation of key baseline models using a single, consistent methodology throughout this paper. All baseline evaluations reported herein utilize the lighteval framework integrated with the vLLM (Kwon et al., 2023) inference engine for efficient" + }, + { + "type": "page_number", + "bbox": [ + 0.902, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.612, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "table", + "bbox": [ + 0.13, + 0.106, + 0.87, + 0.244 + ], + "angle": 0, + "content": "
BASELINE MODELAIME24AIME25AMC23MATH500GPQAMINervaAvg.
DeepSeek-R1-Distilled-Qwen-1.5B23.3316.6762.5082.6031.8230.1541.18
STILL-3-1.5B-preview26.6726.6767.5086.4034.3427.5744.86
DeepScaleR-1.5B-/Preview36.6726.6777.5087.8031.8231.9948.74
Open-RS126.6720.0072.5083.6035.3528.6844.47
Open-RS226.6713.3362.5085.4034.8526.8441.60
Open-RS343.3320.0067.5083.0033.8428.6846.06
" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.254, + 0.845, + 0.27 + ], + "angle": 0, + "content": "Table 2: Baseline model re-evaluation. Performance evaluation of baseline models on six reasoning tasks." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.286, + 0.915, + 0.372 + ], + "angle": 0, + "content": "generation. For comparability with prior work such as OpenR1, we maintained a fixed hardware configuration (two L40S GPUs) and applied a standardized set of vLLM inference parameters across all evaluated baseline models. All scores are zero-shot pass@1 performance. The exact command structure employed for these evaluations is provided in Appendix C.2 for transparency and reproducibility. The results stemming from this consistent re-evaluation protocol are presented in Table 2." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.38, + 0.915, + 0.415 + ], + "angle": 0, + "content": "Particularly, we evaluate the reasoning capabilities of our Tina models and the baselines across a diverse suite of six challenging benchmarks, primarily focused on mathematical and scientific reasoning:" + }, + { + "type": "text", + "bbox": [ + 0.098, + 0.422, + 0.914, + 0.475 + ], + "angle": 0, + "content": "- AIME24/25 (Art of Problem Solving, 2024) contains 30 high-school-level math problems in algebra, geometry, number theory, and combinatorics from the 2024/2025 American Invitational Mathematics Examination. Each problem demands precise multi-step reasoning." + }, + { + "type": "text", + "bbox": [ + 0.098, + 0.481, + 0.915, + 0.517 + ], + "angle": 0, + "content": "- AMC23 (Art of Problem Solving, 2023) includes 40 problems from the 2023 American Mathematics Competition, offering a mix of logic and symbolic manipulation tasks." + }, + { + "type": "text", + "bbox": [ + 0.098, + 0.522, + 0.915, + 0.573 + ], + "angle": 0, + "content": "- MATH500 (Hendrycks et al., 2021, Lightman et al., 2023) is a benchmark comprising 500 competition mathematics problems derived from various sources, covering different difficulty levels and often necessitating multi-step derivation and calculation." + }, + { + "type": "text", + "bbox": [ + 0.098, + 0.581, + 0.915, + 0.616 + ], + "angle": 0, + "content": "- GPQA Diamond (Rein et al., 2024), hereafter referred to as GPQA, consists of 198 PhD-level science questions across biology, chemistry, and physics. Each question is multiple-choice with subtle distractors." + }, + { + "type": "text", + "bbox": [ + 0.098, + 0.622, + 0.915, + 0.691 + ], + "angle": 0, + "content": "- Minerva (Lewkowycz et al., 2022) includes 272 quantitative reasoning problems generally at the undergraduate level. The questions span multiple STEM fields, including physics, biology, chemistry, and economics, often requiring mathematical modeling or calculation steps. Includes tasks such as calculating enzyme kinetics from reaction data." + }, + { + "type": "list", + "bbox": [ + 0.098, + 0.422, + 0.915, + 0.691 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.71, + 0.498, + 0.726 + ], + "angle": 0, + "content": "4.2. Experiments Stage II: Tina Model Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.737, + 0.915, + 0.824 + ], + "angle": 0, + "content": "We now present the core evaluation results for our Tina models. These experiments assess the reasoning capabilities attained by post-training the DeepSeek-R1-Distill-Qwen-1.5B with minimal parameter updates via LoRA-based RL. The results presented in Table 3 demonstrate that significant reasoning performance can be achieved efficiently, yielding models that are competitive with, or outperform, relevant baselines despite the inherent resource constraints of using parameter-efficient tuning.3" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.832, + 0.915, + 0.849 + ], + "angle": 0, + "content": "Table 3 summarizes the performance of five distinct Tina models across a suite of six reasoning tasks:" + }, + { + "type": "page_footnote", + "bbox": [ + 0.082, + 0.859, + 0.915, + 0.903 + ], + "angle": 0, + "content": "3Tables 3 and 4 adopt a consistent naming pattern where \"Tina-X\" denotes our model is the LoRA counterpart of a baseline model X or is trained on a dataset X (possibly followed with an extra ablation setup). This can reflect the model origin and serve as a direct reference to the public checkpoints for reproducibility." + }, + { + "type": "page_number", + "bbox": [ + 0.903, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.388, + 0.069, + 0.61, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.106, + 0.912, + 0.205 + ], + "angle": 0, + "content": "
TINA MODELSTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.BASELINE
Tina-STILL-3-1.5B-preview53%36.6730.0077.5084.6033.3326.8448.1644.86
Tina-DeepScaleR-1.5B-/Preview19%43.3326.6767.5086.2037.8828.6848.3848.74
Tina-Open-RS134%43.3320.0080.0084.0035.3528.6848.5644.47
Tina-Open-RS251%43.3326.6777.5087.0036.3632.7250.6041.60
Tina-Open-RS357%36.6723.3382.5085.2037.3731.6249.4546.06
" + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.215, + 0.915, + 0.275 + ], + "angle": 0, + "content": "Table 3: Tina model evaluation. Performance comparison between Tina models and corresponding full-parameter-trained SOTA models on six reasoning tasks. The value in the Steps column indicates the training steps of the best model checkpoint within one epoch, the full model checkpoint evaluation is shown in Appendix D. The Baseline column represents the average score achieved by baseline model with full-parameter RL in Table 2." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.303, + 0.915, + 0.49 + ], + "angle": 0, + "content": "AIME24/25, AMC23, MATH500, GPQA, and Minerva. For each Tina model, we report the extent of training completed (as a percentage of a predefined training stpes within 1 epoch) and the percentage scores achieved on each task. The results compellingly demonstrate the efficacy of our economical LoRA-based RL strategy. All Tina models exhibit substantial reasoning aptitude, achieving average scores in the range of \\(48.16\\%\\) to \\(50.60\\%\\). Significantly, nearly all Tina models notably outperform their corresponding baseline average scores, indicating marked improvements instilled by the parameter-efficient RL. The Tina-Open-RS2 model yielded the highest average performance observed at \\(50.60\\%\\). Furthermore, these strong results were achieved with remarkably limited training durations, ranging from just \\(19\\%\\) to \\(57\\%\\) of a full training epoch, highlighting the efficiency and rapid adaptation enabled by the Tina approach. These findings strongly support our central hypothesis: robust reasoning capabilities can be effectively and economically cultivated in small language models through the targeted application of LoRA and RL." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.508, + 0.502, + 0.525 + ], + "angle": 0, + "content": "4.3. Experiments Stage III: Tina Ablation Variants" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.536, + 0.915, + 0.656 + ], + "angle": 0, + "content": "To better understand the factors influencing the performance and efficiency of our Tina models within the proposed low-cost framework, we conducted a series of ablation studies. These studies systematically investigate the impact of key design choices and hyperparameter: the underlying training dataset, the learning rate for LoRA updates, the rank of the LoRA adapters, and the specific RL algorithm employed. In each study, we typically varied one factor while holding others constant, often based on a high-performing configuration identified in our main experiments or preliminary runs. The results, summarized in Table 4, provide valuable insights into the robustness and sensitivity of our economical approach." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.665, + 0.915, + 0.768 + ], + "angle": 0, + "content": "Impact of Training Dataset. The first section of Table 4 highlights the influence of the dataset used for RL. We compared seven distinct datasets, varying significantly in size (from \\(\\approx 1.4\\mathrm{k}\\) to \\(\\approx 94\\mathrm{k}\\) samples). Strikingly, the Tina-0pen-RS model, trained on a concise dataset of merely 7k examples, achieved the highest average score (50.60%). This outcome surpasses models trained on considerably larger datasets, such as Tina-0penR1 (93.7k samples, 49.26% avg). This observation strongly supports our core \"Tiny\" premise and reflects the intuition that the quality and diversity of the dataset matter more than the data size." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.776, + 0.915, + 0.861 + ], + "angle": 0, + "content": "Sensitivity to Learning Rate. Using the Tina-LIMR configuration as a testbed (second section of Table 4), we assessed sensitivity to the learning rate. Among the tested values \\((5 \\times 10^{-6}, 1 \\times 10^{-6}\\), and \\(5 \\times 10^{-7}\\)), a learning rate of \\(1 \\times 10^{-6}\\) yielded the optimal average performance \\((48.47\\%)\\) for this setup. While performance differences were not drastic, this indicates that learning rate selection remains a factor, although effective results were obtained without extensive tuning." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.87, + 0.913, + 0.904 + ], + "angle": 0, + "content": "Effect of LoRA Rank. The third ablation study investigated the impact of LoRA rank, which directly controls the number of trainable parameters. Testing ranks 4, 8, 16, 32, and 64 on the Tina-LIMR setup, we observed" + }, + { + "type": "page_number", + "bbox": [ + 0.903, + 0.949, + 0.912, + 0.957 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.61, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "table", + "bbox": [ + 0.086, + 0.106, + 0.912, + 0.474 + ], + "angle": 0, + "content": "
ABLATION ON DATASETSSTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
Tina-OpenR1 (93.7k)13%36.6726.6775.0086.8039.9030.5149.26
Tina-OpenThoughts (66.1k)30%36.6726.6772.5084.8041.4133.0949.19
Tina-DeepScaleR (40.3k)19%43.3326.6767.5086.2037.8828.6848.38
Tina-STILL-3 (33k)53%36.6730.0077.5084.6033.3326.8448.16
Tina-Open-S1 (18.6k)34%43.3320.0080.0084.0035.3528.6848.56
Tina-Open-RS (7k)51%43.3326.6777.5087.0036.3632.7250.60
Tina-LIMR (1.39k)58%46.6720.0075.0083.8034.8530.5148.47
ABLATION ON LEARNING RATESTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
Tina-LIMR-5e-6-lr29%36.6726.6775.0083.6035.8629.4147.87
Tina-LIMR-1e-6-lr58%46.6720.0075.0083.8034.8530.5148.47
Tina-LIMR-5e-7-lr58%43.3316.6777.5084.6034.8530.5147.91
ABLATION ON LORA RANKSTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
Tina-LIMR-64-LoRA-rank29%20.0030.0077.5084.2038.3831.6246.95
Tina-LIMR-32-LoRA-rank58%46.6720.0075.0083.8034.8530.5148.47
Tina-LIMR-16-LoRA-rank58%43.3333.3370.0083.2035.3528.3148.92
Tina-LIMR-8-LoRA-rank29%30.0026.6782.5083.8033.8430.5147.89
Tina-LIMR-4-LoRA-rank86%36.6720.0085.0083.8031.8229.0447.72
ABLATION ON RL ALGORITHMSTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
Tina-Open-RS3-GRPO57%36.6723.3382.5085.2037.3731.6249.45
Tina-Open-RS3-DrGRPO17%43.3323.3380.0085.0035.3530.1549.53
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.082, + 0.484, + 0.916, + 0.622 + ], + "angle": 0, + "content": "Table 4: Tina ablation variants evaluation. Performance evaluation of Tina's ablation variants on six reasoning tasks. The value in the Steps column indicates the training steps of the best model checkpoint within one epoch, the full model checkpoint evaluation is shown in Appendix D. For the number in parentheses (the ablation on datasets), it means the data size of a dataset. During training, this number should be multiplied by the number of generation in GRPO-like algorithm (in our case, that multiplier is 4). For the model names, Tina-LIMR, Tina-LIMR-1e-6-1r and Tina-LIMR-32-LoRA-rank are the same model, we duplicate them for better visualization. The same idea applies to Tina-DeepScaleR and Tina-DeepScaleR-1.5B-Preview, Tina-STILL-3 and Tina-STILL-3-1.5B-preview, Tina-Open-S1 and Tina-Open-RS1, Tina-Open-RS and Tina-Open-RS2, Tina-Open-RS3-GRPO and Tina-Open-RS3." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.636, + 0.918, + 0.723 + ], + "angle": 0, + "content": "considerable robustness. Ranks 8, 16, and 32 all produced strong results, with average scores clustering between \\(47.89\\%\\) and \\(48.92\\%\\). Notably, rank 16 achieved the peak performance \\((48.92\\%)\\) in this comparison, slightly outperforming rank 32 \\((48.47\\%)\\). Performance decreased slightly at the extremes (rank 4 and 64). This study validates that highly parameter-efficient configurations (low ranks like 16 or 32) are effective, further enhancing the cost-effectiveness and minimal overhead of the Tina approach." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.73, + 0.918, + 0.834 + ], + "angle": 0, + "content": "Comparison of RL Algorithms. Finally, we compared two RL algorithms, GRPO and Dr.GRPO (Liu et al., 2025), using the Tina-Open-RS3 setup (final section of Table 4). Both algorithms led to similar peak average performance levels (49.45% for GRPO vs. 49.53% for Dr.GRPO). However, Dr.GRPO reached its best checkpoint significantly earlier in the training process (17% of an epoch vs. 57% for GRPO). This suggests potential advantages in sample efficiency for Dr.GRPO in this context with an alternative normalization in loss calculation, offering potentially faster convergence and further reductions in training time and cost." + }, + { + "type": "page_number", + "bbox": [ + 0.902, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.388, + 0.069, + 0.612, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "title", + "bbox": [ + 0.088, + 0.106, + 0.796, + 0.125 + ], + "angle": 0, + "content": "5. Hypothesis for Effective and Efficient LoRA: Rapid Format Adaptation" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.14, + 0.915, + 0.276 + ], + "angle": 0, + "content": "Less is More LoRA-based RL. To understand why LoRA facilitates both effective and efficient reasoning improvements via RL, we analyze the relationship between training compute and performance, alongside training dynamics. As illustrated in Figure 3, plotting reasoning performance against approximate training FLOPs reveals a stark contrast between full-parameter and LoRA-based training regimes. First, our LoRA-based Tina models achieve reasoning scores comparable or superior to fully fine-tuned baselines while requiring (in some cases) orders of magnitude fewer training FLOPs. We observe that in LoRA models, increased training compute inversely affects performance, in contrast to full-parameter models. This observation highlights a \"less compute can yield more performance\" phenomenon." + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.292, + 0.909, + 0.519 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.088, + 0.533, + 0.913, + 0.564 + ], + "angle": 0, + "content": "Figure 3: Less is more LoRA-based RL. Approximate training FLOPs vs reasoning performance comparison between Tina and baseline models. The calculation is detailed in Appendix A." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.58, + 0.915, + 0.751 + ], + "angle": 0, + "content": "This finding supports our hypothesis regarding how LoRA achieves such remarkable efficiency, which relates to the principle of \"learn structure/format, maintain knowledge.\" We posit that LoRA excels in this scenario because RL for reasoning heavily rewards the model's ability to generate outputs in a specific, verifiable format or structure (e.g., step-by-step reasoning chains). LoRA appears to be highly adept at learning these structural and stylistic patterns with minimal parameter changes, thus requiring very few FLOPs. At the same time, because LoRA modifies only a tiny fraction of the weights, it largely preserves the base model's vast pre-trained knowledge. Therefore, LoRA efficiently teaches the model how to format its existing knowledge into effective reasoning traces, rather than potentially imposing costly relearning of concepts or procedures that extensive full-parameter updates might entail. We hypothesize that this focus on structural adaptation allows Tina to achieve high reasoning performance with minimal computational investment." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.76, + 0.915, + 0.913 + ], + "angle": 0, + "content": "Phase Transition in LoRA-based RL. Further insights into the LoRA-based RL mechanism arise from analyzing the training logs. That is, a distinct pattern emerges in Figure 4, which displays accuracy rewards, format rewards, and completion lengths over training steps for various Tina model runs. We consistently observe a training phase transition or turning point evident in the format-related metrics (format reward, row 2; completion length, row 3) across most Tina models. Around this transition point (indicated by the green vertical dashed line), the format reward often peaks or destabilizes, while the completion length frequently reaches a minimum before potentially reversing its trend. Notably, this relatively sharp transition observed in format and length metrics does not typically have a corresponding distinct turning point in the accuracy reward plots (row 1). The accuracy reward often exhibits more gradual fluctuations or slower drift over the" + }, + { + "type": "page_number", + "bbox": [ + 0.897, + 0.949, + 0.912, + 0.957 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.612, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.108, + 0.695, + 0.125 + ], + "angle": 0, + "content": "training duration, without a clear inflection aligned with the format transition." + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.139, + 0.493, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.138, + 0.874, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.307, + 0.493, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.308, + 0.874, + 0.472 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.126, + 0.476, + 0.493, + 0.64 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.476, + 0.874, + 0.64 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.081, + 0.654, + 0.916, + 0.716 + ], + "angle": 0, + "content": "Figure 4: Phase transition in LoRA-based RL. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1. The \"training turning point\" in the legend means the step where the format-like metrics (e.g., format reward, completion length) start to destabilize. Refer to Appendix E for the full set of plots." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.732, + 0.917, + 0.906 + ], + "angle": 0, + "content": "Another crucial observation is the timing of optimal performance: the best-performing checkpoint, yielding the highest reasoning accuracy on held-out evaluations, consistently occurs just prior to or around this observed phase transition point in the format metrics (indicated by the red vertical dashed line). This decoupling between the dynamics of accuracy-based and format-based metrics suggests that the LoRA-based RL process rapidly optimizes the model's ability to adhere to the structural and stylistic elements rewarded by the format score and length constraints. The subsequent transition point may signify where this structural optimization saturates, becomes unstable, or perhaps begins to compromise generative quality in other ways (e.g., by overly constraining or expanding length). The fact that peak reasoning accuracy is achieved just before this format-driven transition implies that while learning the correct output format is essential and efficiently achieved via LoRA, pushing further on format-centric optimization alone does not necessarily" + }, + { + "type": "page_number", + "bbox": [ + 0.896, + 0.948, + 0.912, + 0.959 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.388, + 0.069, + 0.612, + 0.082 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.108, + 0.913, + 0.142 + ], + "angle": 0, + "content": "yield better reasoning, and may even be detrimental. This reinforces our hypothesis that LoRA efficiently adapts the model by primarily learning the form required for effective reasoning." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.17, + 0.225, + 0.187 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.203, + 0.913, + 0.322 + ], + "angle": 0, + "content": "We presented Tina to demonstrate that effective reasoning capabilities can be instilled in language models with efficiency and effectiveness. The principal contribution of Tina lies in democratizing access to RL-driven reasoning model development. By combining LoRA with RL on a 1.5B parameter base model, we achieved reasoning performance competitive with significantly larger models, accomplishing this within an estimated computational budget of only $9. This outcome prompts reflection on the factors enabling such minimalist approaches, and on their possible future trajectories. Despite encouraging results, this work is subject to certain limitations:" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.331, + 0.913, + 0.381 + ], + "angle": 0, + "content": "Base Model Scale: Our experiments centered on a 1.5B parameter model. While showcasing cost-performance efficiency, the absolute reasoning ceiling achievable with this \"tiny\" model may naturally be lower for complex, multi-step reasoning problems than what larger models can offer." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.391, + 0.913, + 0.442 + ], + "angle": 0, + "content": "Reasoning Task Scope: Our evaluation focused primarily on mathematical and formal logic reasoning benchmarks (AIME, AMC, MATH, GPQA, Minerva). The effectiveness and transferability of the learned reasoning skills to other domains, such as coding, warrants further investigation." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.451, + 0.913, + 0.519 + ], + "angle": 0, + "content": "Hyperparameter Optimization: We intentionally minimized hyperparameter tuning costs by adopting established configurations. While this demonstrates a certain form of robustness to our methodology, there may be potential for further performance gains derived from additional tuning, perhaps tailored to the interplay between LoRA, the RL algorithm, and the target reasoning tasks." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.547, + 0.286, + 0.565 + ], + "angle": 0, + "content": "7. Acknowledgment" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.58, + 0.913, + 0.733 + ], + "angle": 0, + "content": "We want to express our gratitude to the broader open-source community. This research was made possible by leveraging numerous publicly available resources, including training and evaluation framework, open datasets, accessible pre-trained language models, and the insights shared through technical reports. The computational resources required for the experiments described herein were provided by the Center for Advanced Research Computing (CARC) at the University of Southern California (USC). We are grateful for the support which enabled the training and evaluation of our models. J.A. was supported by the National Science Foundation Graduate Research Fellowship Program under Grant No. DGE-1842487. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect the views of the National Science Foundation." + }, + { + "type": "page_number", + "bbox": [ + 0.897, + 0.948, + 0.913, + 0.957 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.385, + 0.069, + 0.612, + 0.082 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.107, + 0.195, + 0.123 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.14, + 0.916, + 0.175 + ], + "angle": 0, + "content": "Pranjal Aggarwal and Sean Welleck. L1: Controlling how long a reasoning model thinks with reinforcement learning, 2025. URL https://arxiv.org/abs/2503.04697." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.185, + 0.915, + 0.22 + ], + "angle": 0, + "content": "Zeyuan Allen-Zhu and Yuanzhi Li. Physics of language models: Part 3.3, knowledge capacity scaling laws. In Proceedings of International Conference on Learning Representations (ICLR), 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.231, + 0.915, + 0.266 + ], + "angle": 0, + "content": "Art of Problem Solving. Amc problems and solutions, 2023. URL https://artofproblemsolving.com/wiki/index.php/AMC_12_Problems_and_Solutions." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.276, + 0.915, + 0.311 + ], + "angle": 0, + "content": "Art of Problem Solving. Aime problems and solutions, February 2024. URL https://artofproblemsolving.com/wiki/index.php/AIME_Problems_and_Solutions." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.322, + 0.915, + 0.374 + ], + "angle": 0, + "content": "Yinlam Chow, Guy Tennenholtz, Izzeddin Gur, Vincent Zhuang, Bo Dai, Sridhar Thiagarajan, Craig Boutilier, Rishabh Agarwal, Aviral Kumar, and Aleksandra Faust. Inference-aware fine-tuning for Best-of-N sampling in large language models, 2024. URL https://arxiv.org/abs/2412.15287." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.385, + 0.915, + 0.418 + ], + "angle": 0, + "content": "Cudo Compute. Nvidia L40S pricing. URL https://www.cudocompute.com/products/gpu-cloud/nvidia-l40s. Accessed: 2025-04-21." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.43, + 0.913, + 0.499 + ], + "angle": 0, + "content": "Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, Jiarui Yuan, Huayu Chen, Kaiyan Zhang, Xingtai Lv, Shuo Wang, Yuan Yao, Xu Han, Hao Peng, Yu Cheng, Zhiyuan Liu, Maosong Sun, Bowen Zhou, and Ning Ding. Process reinforcement through implicit rewards, 2025. URL https://arxiv.org/abs/2502.01456." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.51, + 0.913, + 0.544 + ], + "angle": 0, + "content": "Quy-Anh Dang and Chris Ngo. Reinforcement learning for reasoning in small llms: What works and what doesn't, 2025. URL https://arxiv.org/abs/2503.16219." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.556, + 0.915, + 0.59 + ], + "angle": 0, + "content": "DeepSeek-AI. DeepSeek-R1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.601, + 0.915, + 0.653 + ], + "angle": 0, + "content": "Clémentine Fourrier, Nathan Habib, Hynek Kydlíček, Thomas Wolf, and Lewis Tunstall. Lighteval: A lightweight framework for llm evaluation, 2023. URL https://github.com/huggingface/lighteval." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.664, + 0.915, + 0.748 + ], + "angle": 0, + "content": "Bofei Gao, Feifan Song, Zhe Yang, Zefan Cai, Yibo Miao, Qingxiu Dong, Lei Li, Chenghao Ma, Liang Chen, Runxin Xu, Zhengyang Tang, Benyou Wang, Daoguang Zan, Shanghaoran Quan, Ge Zhang, Lei Sha, Yichang Zhang, Xuancheng Ren, Tianyu Liu, and Baobao Chang. Omni-MATH: A universal olympiad level mathematic benchmark for large language models, 2024a. URL https://arxiv.org/abs/2410.07985." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.761, + 0.915, + 0.846 + ], + "angle": 0, + "content": "Leo Gao, Jonathan Tow, Baber Abbasi, Stella Biderman, Sid Black, Anthony DiPofi, Charles Foster, Laurence Golding, Jeffrey Hsu, Alain Le Noac'h, Haonan Li, Kyle McDonell, Niklas Muennighoff, Chris Ociepa, Jason Phang, Laria Reynolds, Hailey Schoelkopf, Aviya Skowron, Lintang Sutawika, Eric Tang, Anish Thite, Ben Wang, Kevin Wang, and Andy Zou. A framework for few-shot language model evaluation, 07 2024b. URL https://zenodo.org/records/12608602." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.857, + 0.913, + 0.909 + ], + "angle": 0, + "content": "Sylvain Gugger, Lysandre Debut, Thomas Wolf, Philipp Schmid, Zachary Mueller, Sourab Mangrulkar, Marc Sun, and Benjamin Bossan. Accelerate: Training and inference at scale made simple, efficient and adaptable., 2022. URL https://github.com/huggingface/accelerate." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.14, + 0.916, + 0.909 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.897, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.385, + 0.069, + 0.612, + 0.082 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.108, + 0.915, + 0.142 + ], + "angle": 0, + "content": "Zeyu Han, Chao Gao, Jinyang Liu, Jeff Zhang, and Sai Qian Zhang. Parameter-efficient fine-tuning for large models: A comprehensive survey, 2024. URL https://arxiv.org/abs/2403.14608." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.152, + 0.916, + 0.204 + ], + "angle": 0, + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset, 2021. URL https://arxiv.org/abs/2103.03874." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.214, + 0.916, + 0.263 + ], + "angle": 0, + "content": "Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuzhhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LoRA: Low-rank adaptation of large language models, 2021. URL https://arxiv.org/abs/2106.09685." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.274, + 0.916, + 0.327 + ], + "angle": 0, + "content": "Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. Open-Reasoner-Zero: An open source approach to scaling reinforcement learning on the base model, 2025. URL https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.336, + 0.916, + 0.389 + ], + "angle": 0, + "content": "Zhen Huang, Haoyang Zou, Xuefeng Li, Yixiu Liu, Yuxiang Zheng, Ethan Chern, Shijie Xia, Yiwei Qin, Weizhe Yuan, and Pengfei Liu. O1 replication journey - part 2: Surpassing o1-preview through simple distillation, big progress or bitter lesson?, 2024. URL https://arxiv.org/abs/2411.16489." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.398, + 0.916, + 0.433 + ], + "angle": 0, + "content": "Hugging Face. Open r1: A fully open reproduction of deepseek-r1, January 2025. URL https://github.com/huggingface/open-r1." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.442, + 0.916, + 0.493 + ], + "angle": 0, + "content": "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of Symposium on Operating Systems Principles (SOSP), 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.503, + 0.916, + 0.59 + ], + "angle": 0, + "content": "Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V. Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, Yuling Gu, Saumya Malik, Victoria Graf, Jena D. Hwang, Jiangjiang Yang, Ronan Le Bras, Oyvind Tafjord, Chris Wilhelm, Luca Soldaini, Noah A. Smith, Yizhong Wang, Pradeep Dasigi, and Hannaneh Hajishirzi. Tulu 3: Pushing frontiers in open language model post-training, 2025. URL https://arxiv.org/abs/2411.15124." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.6, + 0.916, + 0.669 + ], + "angle": 0, + "content": "Aitor Lewkowycz, Anders Andreassen, David Dohan, Ethan Dyer, Henryk Michalewski, Vinay Ramasesh, Ambrose Slone, Cem Anil, Imanol Schlag, Theo Gutman-Solo, Yuhuai Wu, Behnam Neyshabur, Guy Gur-Ari, and Vedant Misra. Solving quantitative reasoning problems with language models. In Proceedings of Advances in Neural Information Processing Systems (NeurIPS), volume 35, pages 3843-3857, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.678, + 0.916, + 0.73 + ], + "angle": 0, + "content": "Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. NuminaMath, 2024. URL https://huggingface.co/AI-MO/NuminaMath-CoT." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.739, + 0.916, + 0.792 + ], + "angle": 0, + "content": "Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In Proceedings of International Conference on Learning Representations (ICLR), 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.801, + 0.916, + 0.851 + ], + "angle": 0, + "content": "Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective, 2025. URL https://arxiv.org/abs/2503.20783." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.862, + 0.916, + 0.915 + ], + "angle": 0, + "content": "Michael Luo, Sijun Tan, Justin Wong, Xiaoxiang Shi, William Y. Tang, Manan Roongta, Colin Cai, Jeffrey Luo, Tianjun Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. DeepScaleR: Surpassing o1-preview with a 1.5b model by scaling rl, 2025. URL https://agentica-project.com/." + }, + { + "type": "list", + "bbox": [ + 0.085, + 0.108, + 0.916, + 0.915 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.897, + 0.949, + 0.913, + 0.958 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.385, + 0.069, + 0.612, + 0.082 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.108, + 0.913, + 0.142 + ], + "angle": 0, + "content": "Trung Quoc Luong, Xinbo Zhang, Zhanming Jie, Peng Sun, Xiaoran Jin, and Hang Li. ReFT: Reasoning with reinforced fine-tuning, 2024. URL https://arxiv.org/abs/2401.08967." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.153, + 0.917, + 0.223 + ], + "angle": 0, + "content": "Chengqi Lyu, Songyang Gao, Yuzhe Gu, Wenwei Zhang, Jianfei Gao, Kuikun Liu, Ziyi Wang, Shuaibin Li, Qian Zhao, Haian Huang, Weihan Cao, Jiangning Liu, Hongwei Liu, Junnan Liu, Songyang Zhang, Dahua Lin, and Kai Chen. Exploring the limit of outcome reward for learning mathematical reasoning, 2025. URL https://arxiv.org/abs/2502.06781." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.233, + 0.916, + 0.285 + ], + "angle": 0, + "content": "Sourab Mangrulkar, Sylvain Gugger, Lysandre Debut, Younes Belkada, Sayak Paul, and Benjamin Bossan. PEFT: State-of-the-art parameter-efficient fine-tuning methods, 2022. URL https://github.com/huggingface/peft." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.295, + 0.916, + 0.364 + ], + "angle": 0, + "content": "Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, Wayne Xin Zhao, Zheng Liu, Zhongyuan Wang, and Ji-Rong Wen. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems, 2024. URL https://arxiv.org/abs/2412.09413." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.375, + 0.916, + 0.427 + ], + "angle": 0, + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.438, + 0.916, + 0.473 + ], + "angle": 0, + "content": "NovaSky Team. Sky-T1: Train your own o1 preview model within $450, 2025. URL https://novasky-ai.github.io/posts/sky-t1." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.484, + 0.773, + 0.502 + ], + "angle": 0, + "content": "OpenAI. OpenAI o1 system card, 2024. URL https://arxiv.org/abs/2412.16720." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.512, + 0.801, + 0.53 + ], + "angle": 0, + "content": "OpenThoughts Team. Open Thoughts, January 2025. URL https://open-thoughts.ai." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.54, + 0.913, + 0.592 + ], + "angle": 0, + "content": "Debjit Paul, Mete Ismayilzada, Maxime Peyrard, Beatrix Borges, Antoine Bosselut, Robert West, and Boi Faltings. REFINER: Reasoning feedback on intermediate representations. In Proceedings of European Chapter of the ACL (EACL), pages 1100-1126, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.603, + 0.913, + 0.638 + ], + "angle": 0, + "content": "Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual reasoning makes smaller LLMs stronger problem-solvers, 2024. URL https://arxiv.org/abs/2408.06195." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.649, + 0.916, + 0.699 + ], + "angle": 0, + "content": "Samyam Rajbhandari, Jeff Rasley, Olatunj Ruwase, and Yuxiong He. Zero: Memory optimization towards training A trillion parameter models. CoRR, abs/1910.02054, 2019. URL http://arxiv.org/abs/1910.02054." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.711, + 0.916, + 0.763 + ], + "angle": 0, + "content": "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R Bowman. GPQA: A graduate-level google-proof Q&A benchmark. In Proceedings of Conference on Language Modeling (COLM), 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.773, + 0.916, + 0.81 + ], + "angle": 0, + "content": "RUCAIBox STILL Team. STILL-3-1.5B-preview: Enhancing slow thinking abilities of small models through reinforcement learning. 2025. URL https://github.com/RUCAIBox/Slow_Thinking_with_LLMs." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.819, + 0.916, + 0.872 + ], + "angle": 0, + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, Y. K. Li, Y. Wu, and Daya Guo. DeepSeekMath: Pushing the limits of mathematical reasoning in open language models, 2024. URL https://arxiv.org/abs/2402.03300." + }, + { + "type": "list", + "bbox": [ + 0.085, + 0.108, + 0.917, + 0.872 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.897, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.385, + 0.069, + 0.612, + 0.082 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.108, + 0.915, + 0.177 + ], + "angle": 0, + "content": "Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. Hybridflow: A flexible and efficient rlhf framework. In Proceedings of European Conference on Computer Systems (EuroSys), EuroSys '25, page 1279-1297. ACM, March 2025. doi: 10.1145/3689031.3696075. URL http://dx.doi.org/10.1145/3689031.3696075." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.187, + 0.918, + 0.24 + ], + "angle": 0, + "content": "Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. Trl: Transformer reinforcement learning, 2020. URL https://github.com/huggingface/trl." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.25, + 0.916, + 0.285 + ], + "angle": 0, + "content": "Shangshang Wang and Willie Neiswanger. LLM reasoning: Curated insights, 2025. URL https://shangshangwang.notion.site/llm-reasoning." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.295, + 0.915, + 0.348 + ], + "angle": 0, + "content": "Zhenyu Wu, Qingkai Zeng, Zhihan Zhang, Zhaoxuan Tan, Chao Shen, and Meng Jiang. Large language models can self-correct with key condition verification. In Proceedings of Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 12846-12867, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.358, + 0.916, + 0.429 + ], + "angle": 0, + "content": "Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, Chenyang Shao, Yuwei Yan, Qinglong Yang, Yiwen Song, Sijian Ren, Xinyuan Hu, Yu Li, Jie Feng, Chen Gao, and Yong Li. Towards large reasoning models: A survey of reinforced reasoning with large language models, 2025. URL https://arxiv.org/abs/2501.09686." + }, + { + "type": "ref_text", + "bbox": [ + 0.085, + 0.438, + 0.916, + 0.491 + ], + "angle": 0, + "content": "Weihao Zeng, Yuzhen Huang, Qian Liu, Wei Liu, Keqing He, Zejun Ma, and Junxian He. SimpleRL-Zoo: Investigating and taming zero reinforcement learning for open base models in the wild, 2025. URL https://arxiv.org/abs/2503.18892." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.108, + 0.918, + 0.491 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.897, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.612, + 0.082 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "title", + "bbox": [ + 0.443, + 0.106, + 0.543, + 0.127 + ], + "angle": 0, + "content": "Appendix" + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.15, + 0.278, + 0.168 + ], + "angle": 0, + "content": "A. Cost Breakdown" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.183, + 0.918, + 0.234 + ], + "angle": 0, + "content": "This section provides further details on how training data amounts, computational cost, time cost, and performance metrics reported in this paper – particularly those presented in figures like Figures 1 and 3 – were determined and should be interpreted." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.243, + 0.915, + 0.328 + ], + "angle": 0, + "content": "Overall Comparison (Figure 1). For the baseline models included in Figure 1, the approximate training data amounts, computational costs (typically reported as GPU hours or total FLOPs), and training times are sourced from their respective technical reports or publications, leveraging the helpful summary provided in the Open-RS paper (Dang and Ngo, 2025). Reasoning performance scores for all models, encompassing both baselines and our Tina models, stem from results presented in Tables 2 and 3." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.337, + 0.546, + 0.354 + ], + "angle": 0, + "content": "Also, it is crucial to understand the scope of reported costs:" + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.363, + 0.911, + 0.412 + ], + "angle": 0, + "content": "- Epoch vs. Best Checkpoint: Costs cited for Tina and baseline models reflect the resources needed to complete a full training epoch or a predefined training run, not necessarily the minimal cost to reach the single best-performing checkpoint within that run." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.421, + 0.912, + 0.453 + ], + "angle": 0, + "content": "- Training vs. Evaluation: Reported costs cover training only, omitting the computational expense required for model evaluation across benchmarks since such information is missing from several baseline models." + }, + { + "type": "list", + "bbox": [ + 0.097, + 0.363, + 0.912, + 0.453 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.464, + 0.915, + 0.549 + ], + "angle": 0, + "content": "Particularly, the \\(9 USD in the abstract represents the estimated cost to train the Tina model up to its best-performing checkpoint and subsequently evaluate that specific checkpoint. For context comparing potential full training runs, the cost to train a Tina model for a complete epoch is \\)14 USD (training only). Including evaluation costs for such a full run would increase the total to approximately $31 USD. We emphasize the $9 as representing the efficient path to the best Tina model." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.558, + 0.915, + 0.627 + ], + "angle": 0, + "content": "FLOPs Estimation (Figure 3). The approximate training FLOPs shown in Figure 3 serve as a hardware-agnostic measure of computational work. For both Tina and baseline models, these values were estimated based on reported training durations and hardware configurations sourced from technical reports or the Open-RS summary, using standard FLOPs calculation methodologies." + }, + { + "type": "page_number", + "bbox": [ + 0.897, + 0.948, + 0.912, + 0.957 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.385, + 0.069, + 0.612, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.106, + 0.45, + 0.126 + ], + "angle": 0, + "content": "B. Background behind Tina Training" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.141, + 0.285, + 0.158 + ], + "angle": 0, + "content": "B.1. GRPO Formulation" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.169, + 0.915, + 0.206 + ], + "angle": 0, + "content": "Recall the following formulation of GRPO: For each question \\( q \\), GRPO samples a group \\( G = \\{o_1, o_2, \\ldots, o_G\\} \\) of outputs from the old policy \\( \\pi_{\\theta_{\\mathrm{old}}} \\) and optimizes the policy \\( \\pi_{\\theta} \\) by maximizing the following objective:" + }, + { + "type": "equation", + "bbox": [ + 0.11, + 0.217, + 0.887, + 0.277 + ], + "angle": 0, + "content": "\\[\n\\underset { \\begin{array}{c} q \\sim P (Q), \\\\ \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta_ {\\mathrm {o l d}}} (O | q) \\end{array} } {\\mathbb {E}} \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\left(\\min \\left(\\frac {\\pi_ {\\theta} (o _ {i} | q)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} (o _ {i} | q)} A _ {i}, \\operatorname {c l i p p e d} \\left(\\frac {\\pi_ {\\theta} (o _ {i} | q)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} (o _ {i} | q)}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {i}\\right) - \\beta \\mathbb {D} _ {\\mathrm {K L}} (\\pi_ {\\theta} | | \\pi_ {\\mathrm {r e f}})\\right) \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.288, + 0.722, + 0.306 + ], + "angle": 0, + "content": "Here \\(A_{i}\\) denotes the advantage computed from a group of rewards \\(\\{r_1,r_2,\\dots ,r_G\\}\\)" + }, + { + "type": "equation", + "bbox": [ + 0.371, + 0.316, + 0.627, + 0.353 + ], + "angle": 0, + "content": "\\[\nA _ {i} = \\frac {r _ {i} - \\mathrm {m e a n} (\\{r _ {1} , r _ {2} , \\ldots , r _ {G} \\})}{\\mathrm {s t d} (\\{r _ {1} , r _ {2} , \\ldots , r _ {G} \\})},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.364, + 0.12, + 0.38 + ], + "angle": 0, + "content": "and" + }, + { + "type": "equation", + "bbox": [ + 0.314, + 0.375, + 0.685, + 0.413 + ], + "angle": 0, + "content": "\\[\n\\mathbb {D} _ {\\mathrm {K L}} (\\pi_ {\\theta} | | \\pi_ {\\mathrm {r e f}}) = \\frac {\\pi_ {\\mathrm {r e f}} (o _ {i} | q)}{\\pi_ {\\theta} (o _ {i} | q)} - \\log \\frac {\\pi_ {\\mathrm {r e f}} (o _ {i} | q)}{\\pi_ {\\theta} (o _ {i} | q)} - 1.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.419, + 0.805, + 0.436 + ], + "angle": 0, + "content": "Note that \\(\\epsilon\\) and \\(\\beta\\) are parameters controlling the clipping range and KL penalty, respectively." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.461, + 0.28, + 0.477 + ], + "angle": 0, + "content": "B.2. LoRA Formulation" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.491, + 0.915, + 0.547 + ], + "angle": 0, + "content": "We follow the standard LoRA setup (Hu et al., 2021). Given a frozen pretrained weight matrix \\( W_0 \\in \\mathbb{R}^{d \\times k} \\) and trainable low-rank matrices \\( A \\in \\mathbb{R}^{d \\times r} \\) and \\( B \\in \\mathbb{R}^{r \\times k} \\) with \\( r \\ll \\min(d, k) \\), the original forward pass \\( h(x) = W_0 x \\) is modified as" + }, + { + "type": "equation", + "bbox": [ + 0.42, + 0.557, + 0.576, + 0.578 + ], + "angle": 0, + "content": "\\[\n\\hat {h} (x) = W _ {0} x + A B x.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.59, + 0.842, + 0.609 + ], + "angle": 0, + "content": "We use the default LoRA implementation provided in the PEFT (Mangrulkar et al., 2022) library." + }, + { + "type": "page_number", + "bbox": [ + 0.897, + 0.948, + 0.913, + 0.958 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.612, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.106, + 0.433, + 0.126 + ], + "angle": 0, + "content": "C. Additional Experimental Details" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.141, + 0.275, + 0.159 + ], + "angle": 0, + "content": "C.1. Hyperparameters" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.169, + 0.838, + 0.187 + ], + "angle": 0, + "content": "We show our default choice of hyperparameter in Table 5 for all the LoRA-based RL experiments." + }, + { + "type": "table", + "bbox": [ + 0.283, + 0.198, + 0.712, + 0.633 + ], + "angle": 0, + "content": "
Tina-STILL-3-1.5B-previewLoRA
Tina-DeepScaleR-1.5B-PreviewLoRA
Tina-Open-RS{X}-{Y}LoRA
Tina-LIMR-{Z}LoRA
Tina-OpenR1LoRA
Tina-OpenThoughtsLoRA
LoRA Modulesquery, key, value, dense
LoRA Rank32
LoRA α128
LoRA Dropout0.05
AlgorithmGRPO
OptimizerAdamW
Optimizer Momentumβ1, β2 = 0.9, 0.999
Learning Rate1e-6
LR SchedulerCosine with Min LR
Warmup Ratio0.1
PrecisionBF16-mixed
Gradient Accumulation Step4
Total Train Batch Size32
Epochs1
Hardware2 × NVIDIA L40S
Max Prompt Length512
Max Completion Length3584
Number of Generation4
Vllm GPU Memory Utilization0.4
Vllm Max Model Length4608
" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.654, + 0.397, + 0.67 + ], + "angle": 0, + "content": "Table 5: Common hyperparameter settings." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.687, + 0.918, + 0.74 + ], + "angle": 0, + "content": "We also show the varied hyperparameter in Table 6 for all the LoRA-based RL experiments. Particularly, all the reward types including Accuracy, Format, Length, Cosine, Tag Count, Reasoning Steps, Repetition Penalty, are defined and implemented by the OpenR1 code repository.4" + }, + { + "type": "page_footnote", + "bbox": [ + 0.103, + 0.897, + 0.38, + 0.914 + ], + "angle": 0, + "content": "4https://github.com/huggingface/open-r1" + }, + { + "type": "page_number", + "bbox": [ + 0.896, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.388, + 0.07, + 0.611, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "table", + "bbox": [ + 0.225, + 0.091, + 0.714, + 0.909 + ], + "angle": 270, + "content": "
ModelLoRA RankLoRA AlphaLoRA DropoutAlgorithmLearning RateReward TypeReward Weights
Tina-STILL-3-1.5B-preview-----Accuracy, Length2, 1
Tina-DeepScaleR-1.5B-Preview-----Accuracy, Format2, 1
Tina-Open-RS3-----Cosine, Format2, 1
Tina-Open-RS3-DrGRPO---DrGRPO-Cosine, Format2, 1
Tina-Open-RS2-----Accuracy, Format2, 1
Tina-Open-RS1-----Accuracy, Format2, 1
Tina-LIMR-----Accuracy, Format2, 1
Tina-LIMR-5e-6-lr----5e-6Accuracy, Format2, 1
Tina-LIMR-5e-7-lr----5e-7Accuracy, Format2, 1
Tina-LIMR-64-LoRA-rank64256---Accuracy, Format2, 1
Tina-LIMR-16-LoRA-rank1664---Accuracy, Format2, 1
Tina-LIMR-8-LoRA-rank832---Accuracy, Format2, 1
Tina-LIMR-4-LoRA-rank416---Accuracy, Format2, 1
Accuracy, Cosine, Format, Length, Tag Count, Reasoning Steps, Repetition Penalty1, 1, 1, 1, 1, 1
Tina-OpenR1-----Accuracy, Cosine, Format, Length, Tag Count, Reasoning Steps, Repetition Penalty1, 1, 1, 1, 1, 1
Tina-OpenThoughts-----
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.744, + 0.325, + 0.764, + 0.911 + ], + "angle": 270, + "content": "Table 6: Varied hyperparameter settings where “-” means unchanged from the common settings in Table 5." + }, + { + "type": "page_number", + "bbox": [ + 0.897, + 0.949, + 0.912, + 0.958 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.385, + 0.069, + 0.61, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.109, + 0.306, + 0.124 + ], + "angle": 0, + "content": "C.2. Evaluation Command" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.136, + 0.915, + 0.224 + ], + "angle": 0, + "content": "The following is the evaluation command we use to combine lighteval and vLLM for performance evaluation on reasoning tasks. The MODEL_PATH should be replaced with either the local path or huggingface identifier to the model to be evaluated. TASK should be one of the six reasoning tasks including aime24, aime25, amc23, math_500, gpqa: diamond, and minerva. PATH_TO_OPEN_R1_EVALUATEScript should be the path to the custom evaluate script provided by OpenR1." + }, + { + "type": "code", + "bbox": [ + 0.085, + 0.247, + 0.953, + 0.35 + ], + "angle": 0, + "content": "MODEL Arguments=\"pretrained=\\(MODEL_PATH, dtype=float16, data_parallel_size=2, max_model_length=32768, gpu_memory Utilization=0.5, generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}\"" + }, + { + "type": "code", + "bbox": [ + 0.085, + 0.367, + 0.613, + 0.417 + ], + "angle": 0, + "content": "```bash\nlighteval vllm $MODEL.argS \"custom|$TASK|0|0\"\n--custom-tasks $PATH_TO_OPEN_R1_EVALUATE-script\n--use-chat-template" + }, + { + "type": "footer", + "bbox": [ + 0.104, + 0.897, + 0.894, + 0.914 + ], + "angle": 0, + "content": "5https://github.com/huggingface/open-r1/blob/4f5b21e21dec473af9729bce8e084deb16223ae4/src/open_r1/Evaluate.py" + }, + { + "type": "page_number", + "bbox": [ + 0.896, + 0.949, + 0.911, + 0.958 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.388, + 0.07, + 0.611, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "title", + "bbox": [ + 0.088, + 0.106, + 0.513, + 0.124 + ], + "angle": 0, + "content": "D. Full Tina Model Performance Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.14, + 0.915, + 0.174 + ], + "angle": 0, + "content": "In this section, we present all Tina models' detailed evaluation performance during post-training across six reasoning tasks including AIME24/25, AMC23, MATH500, GPQA and Minerva." + }, + { + "type": "table", + "bbox": [ + 0.13, + 0.186, + 0.869, + 0.322 + ], + "angle": 0, + "content": "
CHECKPOINT STEPS (3740 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
50030.0013.3375.0083.6035.8632.3545.02
100036.6720.0065.0084.8032.3227.9444.46
150026.6720.0070.0083.8037.3726.8444.11
200036.6730.0077.5084.6033.3326.8448.16
250033.3330.0070.0083.0035.3527.5746.54
300030.0020.0067.5082.6030.8125.7442.78
350030.0026.6767.5082.2032.3226.1044.13
" + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.333, + 0.583, + 0.348 + ], + "angle": 0, + "content": "Table 7: Performance evaluation of Tina-STILL-3-1.5B-preview." + }, + { + "type": "table", + "bbox": [ + 0.13, + 0.37, + 0.869, + 0.553 + ], + "angle": 0, + "content": "
CHECKPOINT STEPS (5039 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
50030.0023.3367.5082.4039.3931.2545.65
100043.3326.6767.5086.2037.8828.6848.38
150030.0020.0080.0084.8032.8329.4146.17
200020.0026.6757.5080.6029.2924.2639.72
250013.3316.6752.5075.0031.3118.0134.47
300026.6716.6757.5078.6028.7923.1638.57
350023.3323.3362.5080.4031.8224.2640.94
400020.0020.0070.0082.0041.4127.9443.56
450023.3320.0072.5080.8034.8526.4742.99
500020.0026.6775.0080.8033.3329.4144.20
" + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.565, + 0.611, + 0.58 + ], + "angle": 0, + "content": "Table 8: Performance evaluation of Tina-DeepScaleR-1.5B-Preview." + }, + { + "type": "page_number", + "bbox": [ + 0.897, + 0.949, + 0.912, + 0.957 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.388, + 0.07, + 0.611, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "table", + "bbox": [ + 0.13, + 0.14, + 0.869, + 0.442 + ], + "angle": 0, + "content": "
CHECKPOINT STEPS (875 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5026.6723.3375.0084.2037.3729.0445.94
10030.0030.0065.0083.0037.3729.7845.86
15036.6716.6765.0084.8027.7827.9443.14
20020.0026.6770.0083.8033.3327.9443.62
25036.6720.0065.0084.6038.3828.3145.49
30033.3326.6770.0085.2030.8130.1546.03
35040.0016.6777.5084.4039.9027.9447.74
40030.0016.6770.0082.8035.8631.2544.43
45036.6726.6770.0085.6033.8432.7247.58
50036.6723.3382.5085.2037.3731.6249.45
55026.6716.6780.0086.0035.3529.7845.75
60030.0026.6770.0084.6037.8829.7846.49
65020.0023.3380.0085.0033.3327.9444.93
70033.3313.3372.5085.0040.4031.9946.09
75033.3323.3375.0083.6031.3127.5745.69
80030.0023.3365.0084.2038.3829.0444.99
85026.6726.6775.0083.8031.8227.9445.32
" + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.454, + 0.47, + 0.469 + ], + "angle": 0, + "content": "Table 9: Performance evaluation of Tina-0pen-RS3." + }, + { + "type": "table", + "bbox": [ + 0.13, + 0.546, + 0.869, + 0.849 + ], + "angle": 0, + "content": "
CHECKPOINT STEPS (875 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5033.3323.3377.5084.2038.8929.0447.72
10036.6723.3372.5084.2031.3128.6846.12
15040.0023.3372.5085.8030.3030.5147.07
20026.6723.3370.0083.8039.3929.4145.43
25046.6713.3372.5082.6031.8230.5146.24
30030.0026.6775.0084.0033.3329.0446.34
35033.3320.0075.0084.8037.3728.6846.53
40026.6716.6770.0083.2037.3727.5743.58
45043.3326.6777.5087.0036.3632.7250.60
50020.0023.3367.5084.2033.8429.4143.05
55040.0023.3372.5083.6040.9130.8848.54
60033.3320.0072.5084.2032.8330.8845.62
65033.3323.3357.5083.8034.8530.5143.89
70023.3326.6770.0082.4033.3328.6844.07
75030.0023.3372.5084.2038.8929.0446.33
80030.0026.6775.0084.4032.3229.4146.30
85026.6723.3370.0083.8035.8628.6844.72
" + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.861, + 0.478, + 0.876 + ], + "angle": 0, + "content": "Table 10: Performance evaluation of Tina-0pen-RS2." + }, + { + "type": "page_number", + "bbox": [ + 0.897, + 0.949, + 0.912, + 0.957 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.61, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "table", + "bbox": [ + 0.13, + 0.115, + 0.869, + 0.315 + ], + "angle": 0, + "content": "
CHECKPOINT STEPS (2327 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
40033.3320.0075.0083.8031.8229.7845.62
60030.0030.0077.5084.2034.3431.6247.94
80043.3320.0080.0084.0035.3528.6848.56
100033.3320.0082.5084.4035.8629.7847.64
120036.6720.0067.5084.4037.8830.1546.10
140030.0020.0067.5083.4031.8229.7843.75
160023.3313.3365.0083.4035.8626.8441.29
180026.6720.0075.0084.2034.3427.5744.63
200030.0026.6772.5083.0036.3627.9446.08
220030.0023.3370.0081.4030.8126.4743.67
240030.0023.3367.5081.8030.3027.5743.42
" + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.326, + 0.478, + 0.341 + ], + "angle": 0, + "content": "Table 11: Performance evaluation of Tina-0pen-RS1." + }, + { + "type": "table", + "bbox": [ + 0.13, + 0.368, + 0.869, + 0.453 + ], + "angle": 0, + "content": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
5020.0026.6767.5085.4037.8830.5144.66
10046.6720.0075.0083.8034.8530.5148.47
15026.6720.0072.5084.0037.3730.1545.12
20033.3330.0062.5083.4029.8030.8844.99
" + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.464, + 0.441, + 0.478 + ], + "angle": 0, + "content": "Table 12: Performance evaluation of Tina-LIMR." + }, + { + "type": "table", + "bbox": [ + 0.13, + 0.506, + 0.869, + 0.639 + ], + "angle": 0, + "content": "
CHECKPOINT STEPS (11716 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
50030.0020.0077.5085.2033.8430.1546.12
100030.0023.3372.5085.6033.8426.6745.32
150036.6726.6775.0086.8039.9030.5149.26
200026.6723.3367.5083.2029.8031.6243.69
250030.0023.3372.5083.8033.8426.8445.05
300020.0030.0067.5084.6034.3428.3144.13
350036.6723.3367.5083.6031.3125.7444.69
" + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.65, + 0.46, + 0.665 + ], + "angle": 0, + "content": "Table 13: Performance evaluation of Tina-0penR1." + }, + { + "type": "table", + "bbox": [ + 0.13, + 0.692, + 0.869, + 0.875 + ], + "angle": 0, + "content": "
CHECKPOINT STEPS (8259 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
50033.3016.6777.5084.2035.8630.1546.28
100033.3323.3380.0085.2024.7532.7246.56
150030.0023.3370.0086.0037.8829.0446.04
200030.0023.3370.0084.2033.3328.3144.86
250036.6726.6772.5084.8041.4133.0949.19
300026.6723.3375.0083.6034.3432.7245.94
350020.0016.6760.0084.2032.3226.1039.88
400033.3323.3372.5083.6038.3827.9446.51
450030.0020.0065.0085.0033.8426.8443.45
500020.0033.3365.0084.8040.9130.8845.82
" + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.886, + 0.516, + 0.901 + ], + "angle": 0, + "content": "Table 14: Performance evaluation of Tina-OpenThoughts." + }, + { + "type": "page_number", + "bbox": [ + 0.896, + 0.949, + 0.913, + 0.958 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.612, + 0.082 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "table", + "bbox": [ + 0.129, + 0.119, + 0.87, + 0.422 + ], + "angle": 0, + "content": "
CHECKPOINT STEPS (875 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5033.3316.6775.0083.8037.3726.8445.50
10016.6720.0070.0083.2033.3326.4741.61
15043.3323.3380.0085.0035.3530.1549.53
20030.0023.3370.0084.0039.9028.6845.99
25033.3330.0065.0083.8034.3428.3145.80
30036.6716.6767.5084.4037.8829.7845.48
35026.6730.0075.0084.0037.8829.7847.22
40036.6723.3372.5084.4032.8327.5746.22
45036.6716.6772.5085.6029.2927.5744.72
50030.0020.0072.5085.6037.3729.4145.81
55030.0023.3377.5084.8036.8731.6247.35
60033.3326.6772.5083.8030.3028.3145.82
65026.6720.0077.5082.4037.8827.9445.40
70036.6720.0080.0083.8035.3531.2547.85
75030.0026.6775.0084.2038.8927.5747.06
80020.0030.0075.0082.4035.8628.3145.26
85023.3320.0072.5085.4036.3630.1544.62
" + }, + { + "type": "table_caption", + "bbox": [ + 0.084, + 0.433, + 0.545, + 0.449 + ], + "angle": 0, + "content": "Table 15: Performance evaluation of Tina-0pen-RS3-DrGRPO." + }, + { + "type": "table", + "bbox": [ + 0.13, + 0.485, + 0.869, + 0.572 + ], + "angle": 0, + "content": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5020.0026.6767.5085.4037.8830.5144.66
10046.6720.0075.0083.8034.8530.5148.47
15026.6720.0072.5084.0037.3730.1545.12
20033.3330.0062.5083.4029.8030.8844.99
" + }, + { + "type": "table_caption", + "bbox": [ + 0.084, + 0.582, + 0.691, + 0.598 + ], + "angle": 0, + "content": "Table 16: Performance evaluation of Tina-LIMR-5e-6-1r with learning rate 5e-6." + }, + { + "type": "table", + "bbox": [ + 0.13, + 0.635, + 0.869, + 0.721 + ], + "angle": 0, + "content": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5040.0013.3372.5083.0034.3429.0445.37
10043.3316.6777.5084.6034.8530.5147.91
15030.0023.3372.5086.2037.3730.5146.65
20033.3313.3370.0083.2029.2931.2543.40
" + }, + { + "type": "table_caption", + "bbox": [ + 0.084, + 0.731, + 0.691, + 0.747 + ], + "angle": 0, + "content": "Table 17: Performance evaluation of Tina-LIMR-5e-7-1r with learning rate 5e-7." + }, + { + "type": "table", + "bbox": [ + 0.13, + 0.784, + 0.869, + 0.87 + ], + "angle": 0, + "content": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5020.0030.0077.5084.2038.3831.6246.95
10030.0023.3372.5084.6032.3229.7845.42
15036.6720.0070.0083.4031.8230.8845.46
20033.3320.0072.5085.0029.8029.4145.01
" + }, + { + "type": "table_caption", + "bbox": [ + 0.084, + 0.88, + 0.813, + 0.896 + ], + "angle": 0, + "content": "Table 18: Performance evaluation of Tina-LIMR-64-LoRA-rank with LoRA rank 64 and alpha 512." + }, + { + "type": "page_number", + "bbox": [ + 0.895, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.61, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "table", + "bbox": [ + 0.13, + 0.106, + 0.868, + 0.192 + ], + "angle": 0, + "content": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5033.3323.3362.5084.2038.8931.2545.58
10043.3333.3370.0083.2035.3528.3148.92
15026.6716.6772.5083.4035.3529.0443.94
20036.6720.0075.0083.0039.3930.5147.43
" + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.203, + 0.801, + 0.219 + ], + "angle": 0, + "content": "Table 19: Performance evaluation of Tina-LIMR-16-LoRA-rank with LoRA rank 16 and alpha 64." + }, + { + "type": "table", + "bbox": [ + 0.13, + 0.24, + 0.868, + 0.326 + ], + "angle": 0, + "content": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
5030.0026.6782.5083.8033.8430.5147.89
10026.6716.6772.5084.0036.8729.7844.42
15053.3320.0060.0083.2037.3730.8847.46
20023.3320.0072.5085.4032.8328.6843.86
" + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.336, + 0.782, + 0.352 + ], + "angle": 0, + "content": "Table 20: Performance evaluation of Tina-LIMR-8-LoRA-rank with LoRA rank 8 and alpha 32." + }, + { + "type": "table", + "bbox": [ + 0.13, + 0.374, + 0.868, + 0.46 + ], + "angle": 0, + "content": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
5030.0023.3365.0085.0035.3529.7844.74
10026.6726.6772.5082.8034.8529.0445.42
15036.6720.0085.0083.8031.8229.047.72
20033.3323.3377.5085.4035.8628.3147.29
" + }, + { + "type": "table_caption", + "bbox": [ + 0.086, + 0.47, + 0.782, + 0.485 + ], + "angle": 0, + "content": "Table 21: Performance evaluation of Tina-LIMR-4-LoRA-rank with LoRA rank 4 and alpha 16." + }, + { + "type": "page_number", + "bbox": [ + 0.896, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.612, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.106, + 0.533, + 0.127 + ], + "angle": 0, + "content": "E. Full Tina Model Training Phase Transition" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.14, + 0.918, + 0.278 + ], + "angle": 0, + "content": "In this section, we present all Tina models' training phase transitions along the training dynamics. Specifically, we observe clear phase transitions in the training of Tina-DeepScaleR-1.5B-Preview, Tina-STILL-3-1.5B-preview, Tina-Open-RS1, Tina-Open-RS2, Tina-Open-RS3, and Tina-Open-RS3-GRPO, as shown in Figures 5, 6, and 7. For Tina-OpenR1 and Tina-Thoughts (Figures 8 and 9), the observation is similar, except the best-performing checkpoint is achieved after the training turning point, rather than before. However, we do not observe such a transition in all Tina variants on the LIMR dataset, as shown in Figures 10, 11, and 12, possibly because its small data size leads to training periods which are too brief to extract meaningful information." + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.291, + 0.496, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.291, + 0.872, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.459, + 0.495, + 0.625 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.459, + 0.872, + 0.625 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.126, + 0.628, + 0.495, + 0.793 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.628, + 0.871, + 0.793 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.806, + 0.916, + 0.839 + ], + "angle": 0, + "content": "Figure 5: Phase transition in Tina-DeepScaleR-1.5B-Preview and Tina-STILL-3-1.5B-Preview. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + }, + { + "type": "page_number", + "bbox": [ + 0.895, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.612, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.236, + 0.493, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.236, + 0.871, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.126, + 0.404, + 0.493, + 0.568 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.404, + 0.871, + 0.568 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.127, + 0.572, + 0.492, + 0.737 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.572, + 0.871, + 0.737 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.75, + 0.916, + 0.784 + ], + "angle": 0, + "content": "Figure 6: Phase transition in Tina-0pen-RS1 and Tina-0pen-RS2. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + }, + { + "type": "page_number", + "bbox": [ + 0.895, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.612, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.236, + 0.495, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.236, + 0.871, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.126, + 0.404, + 0.495, + 0.568 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.404, + 0.871, + 0.568 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.126, + 0.572, + 0.495, + 0.737 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.572, + 0.871, + 0.737 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.75, + 0.916, + 0.784 + ], + "angle": 0, + "content": "Figure 7: Phase transition in Tina-Open-RS3 and Tina-Open-RS3-GRPO. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + }, + { + "type": "page_number", + "bbox": [ + 0.895, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.612, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.151, + 0.495, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.151, + 0.871, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.32, + 0.495, + 0.484 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.32, + 0.871, + 0.484 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.489, + 0.495, + 0.652 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.489, + 0.871, + 0.652 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.657, + 0.495, + 0.821 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.657, + 0.871, + 0.821 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.835, + 0.916, + 0.866 + ], + "angle": 0, + "content": "Figure 8: Phase transition in Tina-0penR1. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + }, + { + "type": "page_number", + "bbox": [ + 0.895, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "30" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.612, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.151, + 0.495, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.151, + 0.871, + 0.315 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.126, + 0.32, + 0.495, + 0.484 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.32, + 0.871, + 0.484 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.126, + 0.489, + 0.495, + 0.653 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.489, + 0.871, + 0.653 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.126, + 0.657, + 0.495, + 0.821 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.657, + 0.871, + 0.821 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.835, + 0.916, + 0.866 + ], + "angle": 0, + "content": "Figure 9: Phase transition in Tina-OpenThoughts. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + }, + { + "type": "page_number", + "bbox": [ + 0.895, + 0.948, + 0.911, + 0.959 + ], + "angle": 0, + "content": "31" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.612, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "image", + "bbox": [ + 0.099, + 0.3, + 0.361, + 0.417 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.367, + 0.3, + 0.63, + 0.417 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.3, + 0.9, + 0.418 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.099, + 0.42, + 0.362, + 0.538 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.367, + 0.42, + 0.63, + 0.538 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.42, + 0.9, + 0.538 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.098, + 0.54, + 0.362, + 0.659 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.367, + 0.54, + 0.63, + 0.659 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.54, + 0.9, + 0.659 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.671, + 0.916, + 0.715 + ], + "angle": 0, + "content": "Figure 10: Phase transition in Tina-LIMR, Tina-LIMR-64-LoRA-rank and Tina-LIMR-16-LoRA-rank. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + }, + { + "type": "page_number", + "bbox": [ + 0.895, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.612, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.236, + 0.495, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.236, + 0.871, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.126, + 0.404, + 0.495, + 0.568 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.404, + 0.871, + 0.568 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.127, + 0.572, + 0.495, + 0.737 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.572, + 0.871, + 0.737 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.75, + 0.916, + 0.784 + ], + "angle": 0, + "content": "Figure 11: Phase transition in Tina-LIMR-8-LoRA-rank and Tina-LIMR-4-LoRA-rank. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + }, + { + "type": "page_number", + "bbox": [ + 0.895, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.386, + 0.069, + 0.612, + 0.081 + ], + "angle": 0, + "content": "Tina: Tiny Reasoning Models via LoRA" + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.235, + 0.496, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.236, + 0.871, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.126, + 0.404, + 0.495, + 0.568 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.404, + 0.871, + 0.568 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.126, + 0.572, + 0.495, + 0.737 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.572, + 0.871, + 0.737 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.75, + 0.916, + 0.784 + ], + "angle": 0, + "content": "Figure 12: Phase transition in Tina-LIMR-5e-6-1r and Tina-LIMR-5e-7-1r. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + }, + { + "type": "page_number", + "bbox": [ + 0.895, + 0.948, + 0.913, + 0.959 + ], + "angle": 0, + "content": "34" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15777/7d52c4ec-83bf-4780-930e-43bf666b3c1c_origin.pdf b/data/2025/2504_15xxx/2504.15777/7d52c4ec-83bf-4780-930e-43bf666b3c1c_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..80f6856e4628db80ef3c592d128a5b315b389a17 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/7d52c4ec-83bf-4780-930e-43bf666b3c1c_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13bcb9b7828b35529fbdabe2e8faf733814c9cf0ba8f8f80aa3d323f09cc6655 +size 6035521 diff --git a/data/2025/2504_15xxx/2504.15777/full.md b/data/2025/2504_15xxx/2504.15777/full.md new file mode 100644 index 0000000000000000000000000000000000000000..8ce1de52830a23a657ebc68a3fbdd3057ee1cf86 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/full.md @@ -0,0 +1,499 @@ +# Tina: Tiny Reasoning Models via LoRA + +Shangshang Wang1, Julian Asilis1, Ömer Faruk Akgül1, Enes Burak Bilgin1, Ollie Liu1, and Willie Neiswanger1 + +1University of Southern California + +How cost-effectively can strong reasoning abilities be achieved in language models? Driven by this fundamental question, we present Tina, a family of tiny reasoning models achieved with high cost-efficiency. Notably, Tina demonstrates that substantial reasoning performance can be developed using only minimal resources, by applying parameter-efficient updates during reinforcement learning (RL), using low-rank adaptation (LoRA), to an already tiny 1.5B parameter base model. This minimalist approach produces models that achieve reasoning performance which is competitive with, and sometimes surpasses, SOTA RL reasoning models built upon the same base model. Crucially, this is achieved at a tiny fraction of the computational post-training cost employed by existing SOTA models. In fact, the best Tina model achieves a $>20\%$ reasoning performance increase and $43.33\%$ Pass@1 accuracy on AIME24, at only $9 USD post-training and evaluation cost (i.e., an estimated 260x cost reduction). Our work reveals the surprising effectiveness of efficient RL reasoning via LoRA. We validate this across multiple open-source reasoning datasets and various ablation settings starting with a single, fixed set of hyperparameters. Furthermore, we hypothesize that this effectiveness and efficiency stem from LoRA rapidly adapting the model to the structural format of reasoning rewarded by RL, while largely preserving the base model's underlying knowledge. In service of accessibility and open research, we fully open-source all code, training logs, and model weights & checkpoints. + +Notion Blog: https://shangshangwang.notion.site/tina +Code Repository: https://github.com/shangshang-wang/Tina +Training Logs: https://wandb.ai/upup-ashton-wang-usc/Tina +Model Weights & Checkpoints: https://huggingface.co/Tina-Yi + +# 1. Introduction + +Language models (LMs) demonstrate increasing proficiency across a variety of tasks, but achieving robust, multi-step reasoning remains a frontier challenge (Wang and Neiswanger, 2025, Xu et al., 2025). Notably, such reasoning abilities are crucial for applications demanding complex problem-solving, from scientific discovery to intricate planning. Enhancing complex reasoning via supervised fine-tuning (SFT) is a well-adopted technique, often utilizing a distillation process (Min et al., 2024, Huang et al., 2024) by which the model learns to mimic reasoning traces (e.g., step-by-step thinking) generated by more advanced models such as o1 (OpenAI, 2024). This approach, while effective, relies upon the quality and availability of such expert demonstrations, which can be costly to obtain. Furthermore, it can run the risk of instilling a shallow form of imitation in the learning model, rather than fostering dynamic exploration of reasoning paths. In contrast, reinforcement learning (RL) enables models to learn directly and flexibly from verifiable reward signals derived from curated data (DeepSeek-AI, 2025, Lambert et al., 2025). In doing so, RL can lead the model to explore a greater variety of logical paths and possibly discover more robust solutions. However, RL pipelines are often complex and notoriously resource-intensive, typically involving substantial compute. This raises a fundamental question anchoring our research: + +How cost-effectively can one perform RL to efficiently instill reasoning abilities in LMs? + +![](images/c58552ebaf2daf6eabfa675f030cd3a157abeb8ab14396bafc107c507f8547bd.jpg) +Figure 1: Overall comparison between Tina and baseline models. The Tina model in the figure corresponds to the best checkpoint in Table 10. Reasoning performance denotes the average score across AIME24/25, AMC23, MATH500, GPQA, and Minerva, as described in Section 3. The calculation of each comparative metric is detailed in Appendix A. + +![](images/bf53269452a3162272dd632626aa50af3f1678181b6f9d0ba4c95990a34126d1.jpg) + +![](images/e7251e5046f2a5f6cb05b876b7fc39023bdfb9a1ab1a6d709a9ce612c7eaab5a.jpg) + +![](images/6c6d44847f89dd7f528dfee2e033953d575eac2bf14e954d1c4d9a807378e5d6.jpg) + +Our pursuit of this question necessitates a deliberate move towards minimalism. Rather than utilizing models with tens of billions of parameters (such as Qwen-7B/32B, QwQ-32B-preview, and their variants (Min et al., 2024, NovaSky Team, 2025, Zeng et al., 2025, Muennighoff et al., 2025, Cui et al., 2025, Lyu et al., 2025, OpenThoughts Team, 2025, Hu et al., 2025)), we instead direct our attention to tiny models. In particular, we use the 1.5B parameter model, DeepSeek-R1-Distill-Qwen-1.5B (DeepSeek-AI, 2025). Our choice of this base model aligns with common practices in recent research (RUCAIBox STILL Team, 2025, Luo et al., 2025, Dang and Ngo, 2025): we begin with a foundation that, owing to its specific lineage (DeepSeek/Qwen) and distillation process, likely possesses stronger initial reasoning aptitude compared to a generic pre-trained model of equivalent size. This strategic starting point allows us to more-rigorously evaluate the incremental reasoning enhancements imparted by RL, thereby isolating and measuring the effectiveness of the technique itself over a competent baseline. More importantly, selecting such an architecture dramatically lowers the computational and financial threshold for experimentation. Complementing the choice of a compact base model, we further amplify efficiency during the RL phase and integrate parameter-efficient post-training by employing low-rank adaptation (LoRA) (Hu et al., 2021). Notably, LoRA enables the modification of a model's behavior by training only an exceptionally small number of new parameters. This dovetails with our central motivation: achieving reasoning capabilities through the most economical means possible. + +Integrating the previous two components—a “tiny” model architecture and a “tiny” post-training via LoRA-based RL—we release the Tina (Tiny Reasoning Models via LoRA) family of models, which attain substantial reasoning performance at strikingly low cost. In total, we summarize our contributions as follows: + +- Surprising Effectiveness of Efficient RL Reasoning. We show that our Tina models achieve performance competitive with, and in some cases even superior to, SOTA baseline models built on the same base model with full-parameter training, as shown in Figure 1 and in more detail in Table 3. In particular, the best Tina model achieves a $>20\%$ performance increase and $43.33\%$ Pass@1 accuracy on AIME24. +- Rapid Reasoning Format Adaptation Hypothesis. Based on our observations in post-training Tina, we hypothesize that LoRA's effectiveness and efficiency stem from rapidly adapting the reasoning format under RL while preserving base model knowledge—a likely more compute-efficient process than the deep knowledge integration of full-parameter training. Partial support comes from studies showing tiny LMs can reason effectively (Hugging Face, 2025, DeepSeek-AI, 2025), while large LMs can store broader world knowledge (Allen-Zhu and Li, 2025). This distinction suggests reasoning capabilities can be significantly enhanced by focusing on adapting the output format itself, consistent with our hypothesis about LoRA. To test this, we exclusively train LoRA parameters in RL settings, focusing on leveraging this format adaptation mechanism. + +![](images/f7e24a2670d10d97ac1743803388f0f914b52e3adf5a7d8bb766e74bbc93cbba.jpg) +Figure 2: Release timeline of open-source models that aim to replicate the performance of advanced reasoning models like o1(-preview) (OpenAI, 2024) and R1 (DeepSeek-AI, 2025), which we refer to as open-source reasoning replicas. + +- Democratizing RL Reasoning. We provide a reproducible and highly cost-effective approach, enabling wider participation in the exploration of RL techniques without requiring extensive computational resources. Notably, the cost of reproducing the best Tina checkpoint stands at only $9, and of reproducing all our experiments and everything presented in this paper from scratch at$ 526. Furthermore, in line with our goal of promoting accessible research, we release all code, training logs, evaluation scripts, and all Tina checkpoints. + +# 2. Related Work + +# 2.1. Open-Source Reasoning Replicas + +As shown in Figure 2, following the release of o1-preview (OpenAI, 2024), a number of open-source models have emerged to replicate or exceed its reasoning capabilities. STILL (Min et al., 2024) introduced a minimal yet high-quality training recipe designed to elicit reasoning with modest compute, demonstrating that imitation learning from curated traces remains competitive. Sky-T1 (NovaSky Team, 2025) further explored scaling using open instruction-tuned checkpoints, while SimpleRL (Zeng et al., 2025) highlighted the potential of lightweight RL without requiring large-scale reward models. PRIME (Cui et al., 2025) and DeepScaleR (Luo et al., 2025) introduced process supervision and scaling experiments to isolate how reasoning quality evolves with model size and context length. s1 (Muennighoff et al., 2025) showed that even strong base models such as Qwen2.5-32B-Instruct benefit from fine-tuning on only 1k high-quality and long chain-of-thought data, which is curated to elicit reasoning capabilities. L1 (Aggarwal and Welleck, 2025) combined prompt engineering with data curation for RL, resulting in models that can efficiently and adaptively control their response length. Meanwhile, OREAL (Lyu et al., 2025) and OpenThinker (OpenThoughts Team, 2025) investigated self-correction and latent structure emergence through unsupervised and hybrid paradigms. The release of Open Reasoner Zero (Hu et al., 2025) and Open-RS (Dang and Ngo, 2025) further emphasized efficient RL-based strategies for reasoning with small models, completing a landscape of public alternatives for interpretability and reproducibility. + +# 2.2. RL with Verifiable Rewards + +Reasoning tasks are well-suited to RL paradigms, as the correctness or quality of the final output often provides verifiable reward signals (e.g., the validity of a logical deduction). Such signal can effectively guide the model towards learning more robust reasoning strategies. Consequently, various RL approaches have been explored within this domain. Certain methods introduce auxiliary reward models or critics to assess reasoning quality, such as ReFT (Luong et al., 2024) and REFINER (Paul et al., 2024). Other techniques employ explicit rule-based verification for self-correction (Wu et al., 2024). Some leverage self-play dynamics and exploration, such as mutual reasoning (Qi et al., 2024), or utilize inference-aware fine-tuning that optimizes performance under different sampling strategies (Chow et al., 2024). Notably, Group Relative Policy Optimization (GRPO) has been proposed as a variant of Proximal Policy Optimization (PPO) which removes the need for a separate value network by using a group-based baseline for advantage estimation, improving training efficiency and leading to better reward alignment (Shao et al., 2024), as demonstrated by DeepSeek-R1 (DeepSeek-AI, 2025). Subsequently, Dr.GRPO (Liu et al., 2025) introduced a subtle modification of GRPO addressing its bias to produce long responses. For completeness, we provide the standard formulation of GRPO in Appendix B. + +# 2.3. Low-Rank Adaptation + +While most existing open models that enable reasoning rely on the more expensive full-parameter training (Min et al., 2024, NovaSky Team, 2025, Zeng et al., 2025, Muennighoff et al., 2025, Aggarwal and Welleck, 2025, Cui et al., 2025, Luo et al., 2025, Lyu et al., 2025, OpenThoughts Team, 2025, Hu et al., 2025, Dang and Ngo, 2025), we investigate the use of LoRA for parameter-efficient post-training of reasoning models (Hu et al., 2021). Our goal is to assess whether updating only a small fraction of parameters can still yield strong reasoning capabilities (Han et al., 2024). In addition to its computational efficiency, LoRA provides modularity: by training only a low-rank decomposition of the parameter updates, it becomes possible to toggle reasoning behavior without maintaining multiple full model copies. For completeness, we provide the standard formulation of LoRA in Appendix B. + +# 3. Tina: Tiny Reasoning Models via LoRA + +Tina is our family of models created by post-training the DeepSeek-R1-Distill-Qwen-1.5B base model using LoRA during RL (employing a GRPO-style algorithm). The "Tiny" designation encapsulates a deliberate focus on minimalism and efficiency across the entire framework. This encompasses not only the tiny base model architecture and the tiny parameter updates enabled by LoRA, but also extends to a tiny overall resource footprint. This minimized footprint is achieved through an efficient training pipeline leveraging accessible open-source datasets and codebase (detailed in Section 3.1), and requires only minimal hardware and budget resources (described in Section 3.2). + +# 3.1. Training Pipeline: Baselines & Datasets + +To facilitate meaningful comparisons and enable precise ablations, we post-train our Tina models via RL using the datasets and setups from publicly available reasoning models. All Tina and baseline models adopt DeepSeek-R1-Distill-Qwen-1.5B as their base model checkpoint with default open-source weights. + +- STILL-3-1.5B-preview (RUCAIBox STILL Team, 2025) is a slow-thinking reasoning model developed through iterative RL on a curated dataset of $33\mathrm{k}$ reasoning traces. The data originates from mathematics competitions and includes problems from MATH (Hendrycks et al., 2021, Lightman et al., + +2023), NuminaMathCoT (LI et al., 2024), and AIME (1983-2023) (Art of Problem Solving, 2024). Tina-STILL-3-1.5B-preview uses the same dataset and reward pipeline. + +- DeepScaleR-1.5B-Preview (Luo et al., 2025) focuses on long-context mathematical reasoning via RL, and is trained over approximately 40k problem-answer pairs drawn from the AIME (Art of Problem Solving, 2024), AMC (Art of Problem Solving, 2023), OMNI-MATH (Gao et al., 2024a), and STILL (RUCAIBox STILL Team, 2025) datasets. Tina-DeepScaleR-1.5B-Preview uses this dataset and mirrors the reward design. +- Open-RS1/2/3 (Dang and Ngo, 2025) are three models from the Open-RS project exploring reasoning performance in 1.5B models trained via RL. All Open-RS models are trained on small, high-quality datasets further curated from the s1 (Muennighoff et al., 2025) (i.e., Open-S1) and DeepScaleR (Luo et al., 2025) (i.e., Open-DeepScaleR) datasets. The Tina models (Tina-Open-RS1/2/3) replicate these setups, using identical data splits and reward scaffolding. + +# 3.2. Training Setup: Infrastructure & Budget + +Training Codebase. Our implementation builds upon OpenR1, a fully open reproduction of DeepSeek-R1 (DeepSeek-AI, 2025) which combines the Accelerate (Gugger et al., 2022) and Trl (von Werra et al., 2020) libraries and the DeepSpeed ZeRO optimization (Rajbhandari et al., 2019). It aims to transparently replicate and extend RL methods used for improving reasoning in language models, particularly focusing on aligning model behavior with reasoning-oriented objectives via verifiable reward signals. Our methodology inherits its scaffolding, training utilities, and reward interfaces. + +Training Hyperparameters. We initiated parameter selection by replicating key parameters from OpenR1 (Hugging Face, 2025) and OpenRS (Dang and Ngo, 2025). For all experiments presented in this paper, we deliberately adopted the default or recommended hyperparameter configurations provided in their works. These settings were kept largely fixed across different runs (Table 5). For the main Tina results (Section 4.2), only reward function parameters were adjusted per task, and for ablation studies (Section 4.3), only the specific factor under investigation (e.g., learning rate, LoRA rank/alpha, RL algorithm) was varied (Table 6). This approach intentionally circumvents costly hyperparameter search procedures for our specific setup, ensuring negligible tuning overhead and focusing on the efficacy of the core LoRA-based RL methodology. + +Training Hardware. A key element of our low-cost approach was minimizing the hardware footprint. While distributed RL training algorithms like GRPO often benefit from using three or more GPUs (e.g., dedicating one GPU to an inference engine such as vLLM for faster sample generation), we deliberately targeted a minimal setup using only two NVIDIA L40S GPUs. To enable this, we co-located the RL training process and the vLLM on the same two GPUs by constraining vLLM's GPU memory usage. The training itself utilized data parallelism across both GPUs. While running inference and training concurrently on two GPUs might result in a longer wall-clock training time compared to a setup with dedicated inference GPUs, it significantly reduces the hardware requirement. + +Training Budget. The NVIDIA L40S GPUs we use are accessible via commercial cloud platforms at an approximate rate of \(1 USD per GPU hour, including 300 GB storage, based on pricing observed at the time of writing (Cudo Compute). The RL training process for our LoRA models proved highly efficient, with a + +
EXPERIMENTAL TASKTRAINING COST EST.EVALUATION COST EST.TOTAL COST EST.
Baseline: Model Re-Evaluation-$6$6
Main: Tina-STILL-3-1.5B-preview$59$7$66
Main: Tina-DeepScaleR-1.5B-Preview$84$10$94
Main: Tina-Open-RS1$40$11$51
Main: Tina-Open-RS2$15$17$32
Main: Tina-Open-RS3$15$17$32
Ablation: OpenThoughts Dataset$84$10$94
Ablation: OpenR1 Dataset$59$7$66
Ablation: LIMR Dataset$4$4$8
Ablation: DrGRPO Algorithm$15$17$32
Ablation: Learning Rate$7$8$15
Ablation: LoRA Rank/Alpha$14$16$30
Total: All Tasks$396$130$526
Total: Main Tasks$213$62$275
Total: Best Ckpt. in Each Main Task$80$5$85
Total: All Ckpt. in Best-Performance Task$14$17$31
Total: Best Ckpt. in Best-Performance Task$8$1$9
+ +Table 1: Computational cost breakdown. Costs for all experimental tasks in this paper, measured in USD. The row "Best Ckpt. in Each Main Task" denotes the cost of reproducing the best checkpoint in each of Table 7, 8, 9, 10, 11. The row "All Ckpt. in Best-Performance Task" denotes the cost of reproducing all checkpoints in Table 10. "Best Ckpt. in Best-Performance Task" denotes the cost of reproducing the best checkpoint in Table 10, i.e., the checkpoint at step 450. + +single RL step typically completing within one minute on this hardware. Evaluating a model checkpoint across our entire suite of six reasoning benchmarks required approximately 1 L40S GPU hours on average. To ensure cost control, we initially established a conservative maximum budget of \(100 USD for each complete experimental run, encompassing all stages from training to evaluation and miscellaneous tasks. As detailed in Table 1, our actual expenditures were significantly below this ceiling. Our calculation is based on the full Tina model evaluation performance in Appendix D. We believe this low cost makes our setup an accessible testbed for the research community. + +# 4. Surprising Effectiveness of Efficient RL Reasoning via LoRA + +# 4.1. Experiments Stage I: Baseline Model Re-Evaluation + +Before presenting Tina's performance, it is crucial to establish fair and reliable comparisons against existing SOTA reasoning models. We note that performance scores reported in the literature for relevant models often stem from evaluations using disparate frameworks (e.g., verl (Sheng et al., 2025), lighteval (Fourrier et al., 2023), lm-eval-harness (Gao et al., 2024b)) and inconsistent inference settings (such as different generation hyperparameters or varying numbers of GPUs). These variations can significantly influence reported metrics, creating potential inconsistencies and hindering reliable comparisons between models. + +To mitigate these confounding factors, we performed a comprehensive re-evaluation of key baseline models using a single, consistent methodology throughout this paper. All baseline evaluations reported herein utilize the lighteval framework integrated with the vLLM (Kwon et al., 2023) inference engine for efficient + +
BASELINE MODELAIME24AIME25AMC23MATH500GPQAMINervaAvg.
DeepSeek-R1-Distilled-Qwen-1.5B23.3316.6762.5082.6031.8230.1541.18
STILL-3-1.5B-preview26.6726.6767.5086.4034.3427.5744.86
DeepScaleR-1.5B-/Preview36.6726.6777.5087.8031.8231.9948.74
Open-RS126.6720.0072.5083.6035.3528.6844.47
Open-RS226.6713.3362.5085.4034.8526.8441.60
Open-RS343.3320.0067.5083.0033.8428.6846.06
+ +Table 2: Baseline model re-evaluation. Performance evaluation of baseline models on six reasoning tasks. + +generation. For comparability with prior work such as OpenR1, we maintained a fixed hardware configuration (two L40S GPUs) and applied a standardized set of vLLM inference parameters across all evaluated baseline models. All scores are zero-shot pass@1 performance. The exact command structure employed for these evaluations is provided in Appendix C.2 for transparency and reproducibility. The results stemming from this consistent re-evaluation protocol are presented in Table 2. + +Particularly, we evaluate the reasoning capabilities of our Tina models and the baselines across a diverse suite of six challenging benchmarks, primarily focused on mathematical and scientific reasoning: + +- AIME24/25 (Art of Problem Solving, 2024) contains 30 high-school-level math problems in algebra, geometry, number theory, and combinatorics from the 2024/2025 American Invitational Mathematics Examination. Each problem demands precise multi-step reasoning. +- AMC23 (Art of Problem Solving, 2023) includes 40 problems from the 2023 American Mathematics Competition, offering a mix of logic and symbolic manipulation tasks. +- MATH500 (Hendrycks et al., 2021, Lightman et al., 2023) is a benchmark comprising 500 competition mathematics problems derived from various sources, covering different difficulty levels and often necessitating multi-step derivation and calculation. +- GPQA Diamond (Rein et al., 2024), hereafter referred to as GPQA, consists of 198 PhD-level science questions across biology, chemistry, and physics. Each question is multiple-choice with subtle distractors. +- Minerva (Lewkowycz et al., 2022) includes 272 quantitative reasoning problems generally at the undergraduate level. The questions span multiple STEM fields, including physics, biology, chemistry, and economics, often requiring mathematical modeling or calculation steps. Includes tasks such as calculating enzyme kinetics from reaction data. + +# 4.2. Experiments Stage II: Tina Model Evaluation + +We now present the core evaluation results for our Tina models. These experiments assess the reasoning capabilities attained by post-training the DeepSeek-R1-Distill-Qwen-1.5B with minimal parameter updates via LoRA-based RL. The results presented in Table 3 demonstrate that significant reasoning performance can be achieved efficiently, yielding models that are competitive with, or outperform, relevant baselines despite the inherent resource constraints of using parameter-efficient tuning.3 + +Table 3 summarizes the performance of five distinct Tina models across a suite of six reasoning tasks: + +
TINA MODELSTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.BASELINE
Tina-STILL-3-1.5B-preview53%36.6730.0077.5084.6033.3326.8448.1644.86
Tina-DeepScaleR-1.5B-/Preview19%43.3326.6767.5086.2037.8828.6848.3848.74
Tina-Open-RS134%43.3320.0080.0084.0035.3528.6848.5644.47
Tina-Open-RS251%43.3326.6777.5087.0036.3632.7250.6041.60
Tina-Open-RS357%36.6723.3382.5085.2037.3731.6249.4546.06
+ +Table 3: Tina model evaluation. Performance comparison between Tina models and corresponding full-parameter-trained SOTA models on six reasoning tasks. The value in the Steps column indicates the training steps of the best model checkpoint within one epoch, the full model checkpoint evaluation is shown in Appendix D. The Baseline column represents the average score achieved by baseline model with full-parameter RL in Table 2. + +AIME24/25, AMC23, MATH500, GPQA, and Minerva. For each Tina model, we report the extent of training completed (as a percentage of a predefined training stpes within 1 epoch) and the percentage scores achieved on each task. The results compellingly demonstrate the efficacy of our economical LoRA-based RL strategy. All Tina models exhibit substantial reasoning aptitude, achieving average scores in the range of $48.16\%$ to $50.60\%$ . Significantly, nearly all Tina models notably outperform their corresponding baseline average scores, indicating marked improvements instilled by the parameter-efficient RL. The Tina-Open-RS2 model yielded the highest average performance observed at $50.60\%$ . Furthermore, these strong results were achieved with remarkably limited training durations, ranging from just $19\%$ to $57\%$ of a full training epoch, highlighting the efficiency and rapid adaptation enabled by the Tina approach. These findings strongly support our central hypothesis: robust reasoning capabilities can be effectively and economically cultivated in small language models through the targeted application of LoRA and RL. + +# 4.3. Experiments Stage III: Tina Ablation Variants + +To better understand the factors influencing the performance and efficiency of our Tina models within the proposed low-cost framework, we conducted a series of ablation studies. These studies systematically investigate the impact of key design choices and hyperparameter: the underlying training dataset, the learning rate for LoRA updates, the rank of the LoRA adapters, and the specific RL algorithm employed. In each study, we typically varied one factor while holding others constant, often based on a high-performing configuration identified in our main experiments or preliminary runs. The results, summarized in Table 4, provide valuable insights into the robustness and sensitivity of our economical approach. + +Impact of Training Dataset. The first section of Table 4 highlights the influence of the dataset used for RL. We compared seven distinct datasets, varying significantly in size (from $\approx 1.4\mathrm{k}$ to $\approx 94\mathrm{k}$ samples). Strikingly, the Tina-0pen-RS model, trained on a concise dataset of merely 7k examples, achieved the highest average score (50.60%). This outcome surpasses models trained on considerably larger datasets, such as Tina-0penR1 (93.7k samples, 49.26% avg). This observation strongly supports our core "Tiny" premise and reflects the intuition that the quality and diversity of the dataset matter more than the data size. + +Sensitivity to Learning Rate. Using the Tina-LIMR configuration as a testbed (second section of Table 4), we assessed sensitivity to the learning rate. Among the tested values $(5 \times 10^{-6}, 1 \times 10^{-6}$ , and $5 \times 10^{-7}$ ), a learning rate of $1 \times 10^{-6}$ yielded the optimal average performance $(48.47\%)$ for this setup. While performance differences were not drastic, this indicates that learning rate selection remains a factor, although effective results were obtained without extensive tuning. + +Effect of LoRA Rank. The third ablation study investigated the impact of LoRA rank, which directly controls the number of trainable parameters. Testing ranks 4, 8, 16, 32, and 64 on the Tina-LIMR setup, we observed + +
ABLATION ON DATASETSSTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
Tina-OpenR1 (93.7k)13%36.6726.6775.0086.8039.9030.5149.26
Tina-OpenThoughts (66.1k)30%36.6726.6772.5084.8041.4133.0949.19
Tina-DeepScaleR (40.3k)19%43.3326.6767.5086.2037.8828.6848.38
Tina-STILL-3 (33k)53%36.6730.0077.5084.6033.3326.8448.16
Tina-Open-S1 (18.6k)34%43.3320.0080.0084.0035.3528.6848.56
Tina-Open-RS (7k)51%43.3326.6777.5087.0036.3632.7250.60
Tina-LIMR (1.39k)58%46.6720.0075.0083.8034.8530.5148.47
ABLATION ON LEARNING RATESTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
Tina-LIMR-5e-6-lr29%36.6726.6775.0083.6035.8629.4147.87
Tina-LIMR-1e-6-lr58%46.6720.0075.0083.8034.8530.5148.47
Tina-LIMR-5e-7-lr58%43.3316.6777.5084.6034.8530.5147.91
ABLATION ON LORA RANKSTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
Tina-LIMR-64-LoRA-rank29%20.0030.0077.5084.2038.3831.6246.95
Tina-LIMR-32-LoRA-rank58%46.6720.0075.0083.8034.8530.5148.47
Tina-LIMR-16-LoRA-rank58%43.3333.3370.0083.2035.3528.3148.92
Tina-LIMR-8-LoRA-rank29%30.0026.6782.5083.8033.8430.5147.89
Tina-LIMR-4-LoRA-rank86%36.6720.0085.0083.8031.8229.0447.72
ABLATION ON RL ALGORITHMSTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
Tina-Open-RS3-GRPO57%36.6723.3382.5085.2037.3731.6249.45
Tina-Open-RS3-DrGRPO17%43.3323.3380.0085.0035.3530.1549.53
+ +Table 4: Tina ablation variants evaluation. Performance evaluation of Tina's ablation variants on six reasoning tasks. The value in the Steps column indicates the training steps of the best model checkpoint within one epoch, the full model checkpoint evaluation is shown in Appendix D. For the number in parentheses (the ablation on datasets), it means the data size of a dataset. During training, this number should be multiplied by the number of generation in GRPO-like algorithm (in our case, that multiplier is 4). For the model names, Tina-LIMR, Tina-LIMR-1e-6-1r and Tina-LIMR-32-LoRA-rank are the same model, we duplicate them for better visualization. The same idea applies to Tina-DeepScaleR and Tina-DeepScaleR-1.5B-Preview, Tina-STILL-3 and Tina-STILL-3-1.5B-preview, Tina-Open-S1 and Tina-Open-RS1, Tina-Open-RS and Tina-Open-RS2, Tina-Open-RS3-GRPO and Tina-Open-RS3. + +considerable robustness. Ranks 8, 16, and 32 all produced strong results, with average scores clustering between $47.89\%$ and $48.92\%$ . Notably, rank 16 achieved the peak performance $(48.92\%)$ in this comparison, slightly outperforming rank 32 $(48.47\%)$ . Performance decreased slightly at the extremes (rank 4 and 64). This study validates that highly parameter-efficient configurations (low ranks like 16 or 32) are effective, further enhancing the cost-effectiveness and minimal overhead of the Tina approach. + +Comparison of RL Algorithms. Finally, we compared two RL algorithms, GRPO and Dr.GRPO (Liu et al., 2025), using the Tina-Open-RS3 setup (final section of Table 4). Both algorithms led to similar peak average performance levels (49.45% for GRPO vs. 49.53% for Dr.GRPO). However, Dr.GRPO reached its best checkpoint significantly earlier in the training process (17% of an epoch vs. 57% for GRPO). This suggests potential advantages in sample efficiency for Dr.GRPO in this context with an alternative normalization in loss calculation, offering potentially faster convergence and further reductions in training time and cost. + +# 5. Hypothesis for Effective and Efficient LoRA: Rapid Format Adaptation + +Less is More LoRA-based RL. To understand why LoRA facilitates both effective and efficient reasoning improvements via RL, we analyze the relationship between training compute and performance, alongside training dynamics. As illustrated in Figure 3, plotting reasoning performance against approximate training FLOPs reveals a stark contrast between full-parameter and LoRA-based training regimes. First, our LoRA-based Tina models achieve reasoning scores comparable or superior to fully fine-tuned baselines while requiring (in some cases) orders of magnitude fewer training FLOPs. We observe that in LoRA models, increased training compute inversely affects performance, in contrast to full-parameter models. This observation highlights a "less compute can yield more performance" phenomenon. + +![](images/1a3b1780e0c1635d42f9fb927eb80189e6d0502e9d868b337a308db1217364ff.jpg) +Figure 3: Less is more LoRA-based RL. Approximate training FLOPs vs reasoning performance comparison between Tina and baseline models. The calculation is detailed in Appendix A. + +This finding supports our hypothesis regarding how LoRA achieves such remarkable efficiency, which relates to the principle of "learn structure/format, maintain knowledge." We posit that LoRA excels in this scenario because RL for reasoning heavily rewards the model's ability to generate outputs in a specific, verifiable format or structure (e.g., step-by-step reasoning chains). LoRA appears to be highly adept at learning these structural and stylistic patterns with minimal parameter changes, thus requiring very few FLOPs. At the same time, because LoRA modifies only a tiny fraction of the weights, it largely preserves the base model's vast pre-trained knowledge. Therefore, LoRA efficiently teaches the model how to format its existing knowledge into effective reasoning traces, rather than potentially imposing costly relearning of concepts or procedures that extensive full-parameter updates might entail. We hypothesize that this focus on structural adaptation allows Tina to achieve high reasoning performance with minimal computational investment. + +Phase Transition in LoRA-based RL. Further insights into the LoRA-based RL mechanism arise from analyzing the training logs. That is, a distinct pattern emerges in Figure 4, which displays accuracy rewards, format rewards, and completion lengths over training steps for various Tina model runs. We consistently observe a training phase transition or turning point evident in the format-related metrics (format reward, row 2; completion length, row 3) across most Tina models. Around this transition point (indicated by the green vertical dashed line), the format reward often peaks or destabilizes, while the completion length frequently reaches a minimum before potentially reversing its trend. Notably, this relatively sharp transition observed in format and length metrics does not typically have a corresponding distinct turning point in the accuracy reward plots (row 1). The accuracy reward often exhibits more gradual fluctuations or slower drift over the + +training duration, without a clear inflection aligned with the format transition. + +![](images/d40c018fc96b24189e82fe7ab40c407172146c7f87651959576d69ef64fe1e94.jpg) + +![](images/59fd125d2f1a13bd4fccfe6713425a2c004cd392986e4c427eb467173796bb57.jpg) + +![](images/a927aeddf98e9e4ddcd83a2c9de8d7f68786d344059baf05973d53f2a46f7f94.jpg) + +![](images/a253375ed3cfaee7d217e3d0f9b8bbed5a508724cc1b432063d0e1b796a0d874.jpg) + +![](images/a680baf93b2b90175aa50f3beb3cff201d9162fda811c1a9da2a5e11491e4d83.jpg) +Figure 4: Phase transition in LoRA-based RL. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1. The "training turning point" in the legend means the step where the format-like metrics (e.g., format reward, completion length) start to destabilize. Refer to Appendix E for the full set of plots. + +![](images/d641c625095868eb4819266432c0c7a072fc31c2f491c3d79d392f1711d5b00e.jpg) + +Another crucial observation is the timing of optimal performance: the best-performing checkpoint, yielding the highest reasoning accuracy on held-out evaluations, consistently occurs just prior to or around this observed phase transition point in the format metrics (indicated by the red vertical dashed line). This decoupling between the dynamics of accuracy-based and format-based metrics suggests that the LoRA-based RL process rapidly optimizes the model's ability to adhere to the structural and stylistic elements rewarded by the format score and length constraints. The subsequent transition point may signify where this structural optimization saturates, becomes unstable, or perhaps begins to compromise generative quality in other ways (e.g., by overly constraining or expanding length). The fact that peak reasoning accuracy is achieved just before this format-driven transition implies that while learning the correct output format is essential and efficiently achieved via LoRA, pushing further on format-centric optimization alone does not necessarily + +yield better reasoning, and may even be detrimental. This reinforces our hypothesis that LoRA efficiently adapts the model by primarily learning the form required for effective reasoning. + +# 6. Conclusion + +We presented Tina to demonstrate that effective reasoning capabilities can be instilled in language models with efficiency and effectiveness. The principal contribution of Tina lies in democratizing access to RL-driven reasoning model development. By combining LoRA with RL on a 1.5B parameter base model, we achieved reasoning performance competitive with significantly larger models, accomplishing this within an estimated computational budget of only $9. This outcome prompts reflection on the factors enabling such minimalist approaches, and on their possible future trajectories. Despite encouraging results, this work is subject to certain limitations: + +Base Model Scale: Our experiments centered on a 1.5B parameter model. While showcasing cost-performance efficiency, the absolute reasoning ceiling achievable with this "tiny" model may naturally be lower for complex, multi-step reasoning problems than what larger models can offer. + +Reasoning Task Scope: Our evaluation focused primarily on mathematical and formal logic reasoning benchmarks (AIME, AMC, MATH, GPQA, Minerva). The effectiveness and transferability of the learned reasoning skills to other domains, such as coding, warrants further investigation. + +Hyperparameter Optimization: We intentionally minimized hyperparameter tuning costs by adopting established configurations. While this demonstrates a certain form of robustness to our methodology, there may be potential for further performance gains derived from additional tuning, perhaps tailored to the interplay between LoRA, the RL algorithm, and the target reasoning tasks. + +# 7. Acknowledgment + +We want to express our gratitude to the broader open-source community. This research was made possible by leveraging numerous publicly available resources, including training and evaluation framework, open datasets, accessible pre-trained language models, and the insights shared through technical reports. The computational resources required for the experiments described herein were provided by the Center for Advanced Research Computing (CARC) at the University of Southern California (USC). We are grateful for the support which enabled the training and evaluation of our models. J.A. was supported by the National Science Foundation Graduate Research Fellowship Program under Grant No. DGE-1842487. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect the views of the National Science Foundation. + +# References + +Pranjal Aggarwal and Sean Welleck. L1: Controlling how long a reasoning model thinks with reinforcement learning, 2025. URL https://arxiv.org/abs/2503.04697. +Zeyuan Allen-Zhu and Yuanzhi Li. Physics of language models: Part 3.3, knowledge capacity scaling laws. In Proceedings of International Conference on Learning Representations (ICLR), 2025. +Art of Problem Solving. Amc problems and solutions, 2023. URL https://artofproblemsolving.com/wiki/index.php/AMC_12_Problems_and_Solutions. +Art of Problem Solving. Aime problems and solutions, February 2024. URL https://artofproblemsolving.com/wiki/index.php/AIME_Problems_and_Solutions. +Yinlam Chow, Guy Tennenholtz, Izzeddin Gur, Vincent Zhuang, Bo Dai, Sridhar Thiagarajan, Craig Boutilier, Rishabh Agarwal, Aviral Kumar, and Aleksandra Faust. Inference-aware fine-tuning for Best-of-N sampling in large language models, 2024. URL https://arxiv.org/abs/2412.15287. +Cudo Compute. Nvidia L40S pricing. URL https://www.cudocompute.com/products/gpu-cloud/nvidia-l40s. Accessed: 2025-04-21. +Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, Jiarui Yuan, Huayu Chen, Kaiyan Zhang, Xingtai Lv, Shuo Wang, Yuan Yao, Xu Han, Hao Peng, Yu Cheng, Zhiyuan Liu, Maosong Sun, Bowen Zhou, and Ning Ding. Process reinforcement through implicit rewards, 2025. URL https://arxiv.org/abs/2502.01456. +Quy-Anh Dang and Chris Ngo. Reinforcement learning for reasoning in small llms: What works and what doesn't, 2025. URL https://arxiv.org/abs/2503.16219. +DeepSeek-AI. DeepSeek-R1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948. +Clémentine Fourrier, Nathan Habib, Hynek Kydlíček, Thomas Wolf, and Lewis Tunstall. Lighteval: A lightweight framework for llm evaluation, 2023. URL https://github.com/huggingface/lighteval. +Bofei Gao, Feifan Song, Zhe Yang, Zefan Cai, Yibo Miao, Qingxiu Dong, Lei Li, Chenghao Ma, Liang Chen, Runxin Xu, Zhengyang Tang, Benyou Wang, Daoguang Zan, Shanghaoran Quan, Ge Zhang, Lei Sha, Yichang Zhang, Xuancheng Ren, Tianyu Liu, and Baobao Chang. Omni-MATH: A universal olympiad level mathematic benchmark for large language models, 2024a. URL https://arxiv.org/abs/2410.07985. +Leo Gao, Jonathan Tow, Baber Abbasi, Stella Biderman, Sid Black, Anthony DiPofi, Charles Foster, Laurence Golding, Jeffrey Hsu, Alain Le Noac'h, Haonan Li, Kyle McDonell, Niklas Muennighoff, Chris Ociepa, Jason Phang, Laria Reynolds, Hailey Schoelkopf, Aviya Skowron, Lintang Sutawika, Eric Tang, Anish Thite, Ben Wang, Kevin Wang, and Andy Zou. A framework for few-shot language model evaluation, 07 2024b. URL https://zenodo.org/records/12608602. +Sylvain Gugger, Lysandre Debut, Thomas Wolf, Philipp Schmid, Zachary Mueller, Sourab Mangrulkar, Marc Sun, and Benjamin Bossan. Accelerate: Training and inference at scale made simple, efficient and adaptable., 2022. URL https://github.com/huggingface/accelerate. + +Zeyu Han, Chao Gao, Jinyang Liu, Jeff Zhang, and Sai Qian Zhang. Parameter-efficient fine-tuning for large models: A comprehensive survey, 2024. URL https://arxiv.org/abs/2403.14608. +Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset, 2021. URL https://arxiv.org/abs/2103.03874. +Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuzhhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LoRA: Low-rank adaptation of large language models, 2021. URL https://arxiv.org/abs/2106.09685. +Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. Open-Reasoner-Zero: An open source approach to scaling reinforcement learning on the base model, 2025. URL https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero. +Zhen Huang, Haoyang Zou, Xuefeng Li, Yixiu Liu, Yuxiang Zheng, Ethan Chern, Shijie Xia, Yiwei Qin, Weizhe Yuan, and Pengfei Liu. O1 replication journey - part 2: Surpassing o1-preview through simple distillation, big progress or bitter lesson?, 2024. URL https://arxiv.org/abs/2411.16489. +Hugging Face. Open r1: A fully open reproduction of deepseek-r1, January 2025. URL https://github.com/huggingface/open-r1. +Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of Symposium on Operating Systems Principles (SOSP), 2023. +Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V. Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, Yuling Gu, Saumya Malik, Victoria Graf, Jena D. Hwang, Jiangjiang Yang, Ronan Le Bras, Oyvind Tafjord, Chris Wilhelm, Luca Soldaini, Noah A. Smith, Yizhong Wang, Pradeep Dasigi, and Hannaneh Hajishirzi. Tulu 3: Pushing frontiers in open language model post-training, 2025. URL https://arxiv.org/abs/2411.15124. +Aitor Lewkowycz, Anders Andreassen, David Dohan, Ethan Dyer, Henryk Michalewski, Vinay Ramasesh, Ambrose Slone, Cem Anil, Imanol Schlag, Theo Gutman-Solo, Yuhuai Wu, Behnam Neyshabur, Guy Gur-Ari, and Vedant Misra. Solving quantitative reasoning problems with language models. In Proceedings of Advances in Neural Information Processing Systems (NeurIPS), volume 35, pages 3843-3857, 2022. +Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. NuminaMath, 2024. URL https://huggingface.co/AI-MO/NuminaMath-CoT. +Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In Proceedings of International Conference on Learning Representations (ICLR), 2023. +Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective, 2025. URL https://arxiv.org/abs/2503.20783. +Michael Luo, Sijun Tan, Justin Wong, Xiaoxiang Shi, William Y. Tang, Manan Roongta, Colin Cai, Jeffrey Luo, Tianjun Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. DeepScaleR: Surpassing o1-preview with a 1.5b model by scaling rl, 2025. URL https://agentica-project.com/. + +Trung Quoc Luong, Xinbo Zhang, Zhanming Jie, Peng Sun, Xiaoran Jin, and Hang Li. ReFT: Reasoning with reinforced fine-tuning, 2024. URL https://arxiv.org/abs/2401.08967. +Chengqi Lyu, Songyang Gao, Yuzhe Gu, Wenwei Zhang, Jianfei Gao, Kuikun Liu, Ziyi Wang, Shuaibin Li, Qian Zhao, Haian Huang, Weihan Cao, Jiangning Liu, Hongwei Liu, Junnan Liu, Songyang Zhang, Dahua Lin, and Kai Chen. Exploring the limit of outcome reward for learning mathematical reasoning, 2025. URL https://arxiv.org/abs/2502.06781. +Sourab Mangrulkar, Sylvain Gugger, Lysandre Debut, Younes Belkada, Sayak Paul, and Benjamin Bossan. PEFT: State-of-the-art parameter-efficient fine-tuning methods, 2022. URL https://github.com/huggingface/peft. +Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, Wayne Xin Zhao, Zheng Liu, Zhongyuan Wang, and Ji-Rong Wen. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems, 2024. URL https://arxiv.org/abs/2412.09413. +Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393. +NovaSky Team. Sky-T1: Train your own o1 preview model within $450, 2025. URL https://novasky-ai.github.io/posts/sky-t1. +OpenAI. OpenAI o1 system card, 2024. URL https://arxiv.org/abs/2412.16720. +OpenThoughts Team. Open Thoughts, January 2025. URL https://open-thoughts.ai. +Debjit Paul, Mete Ismayilzada, Maxime Peyrard, Beatrix Borges, Antoine Bosselut, Robert West, and Boi Faltings. REFINER: Reasoning feedback on intermediate representations. In Proceedings of European Chapter of the ACL (EACL), pages 1100-1126, 2024. +Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual reasoning makes smaller LLMs stronger problem-solvers, 2024. URL https://arxiv.org/abs/2408.06195. +Samyam Rajbhandari, Jeff Rasley, Olatunj Ruwase, and Yuxiong He. Zero: Memory optimization towards training A trillion parameter models. CoRR, abs/1910.02054, 2019. URL http://arxiv.org/abs/1910.02054. +David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R Bowman. GPQA: A graduate-level google-proof Q&A benchmark. In Proceedings of Conference on Language Modeling (COLM), 2024. +RUCAIBox STILL Team. STILL-3-1.5B-preview: Enhancing slow thinking abilities of small models through reinforcement learning. 2025. URL https://github.com/RUCAIBox/Slow_Thinking_with_LLMs. +Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, Y. K. Li, Y. Wu, and Daya Guo. DeepSeekMath: Pushing the limits of mathematical reasoning in open language models, 2024. URL https://arxiv.org/abs/2402.03300. + +Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. Hybridflow: A flexible and efficient rlhf framework. In Proceedings of European Conference on Computer Systems (EuroSys), EuroSys '25, page 1279-1297. ACM, March 2025. doi: 10.1145/3689031.3696075. URL http://dx.doi.org/10.1145/3689031.3696075. +Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. Trl: Transformer reinforcement learning, 2020. URL https://github.com/huggingface/trl. +Shangshang Wang and Willie Neiswanger. LLM reasoning: Curated insights, 2025. URL https://shangshangwang.notion.site/llm-reasoning. +Zhenyu Wu, Qingkai Zeng, Zhihan Zhang, Zhaoxuan Tan, Chao Shen, and Meng Jiang. Large language models can self-correct with key condition verification. In Proceedings of Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 12846-12867, 2024. +Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, Chenyang Shao, Yuwei Yan, Qinglong Yang, Yiwen Song, Sijian Ren, Xinyuan Hu, Yu Li, Jie Feng, Chen Gao, and Yong Li. Towards large reasoning models: A survey of reinforced reasoning with large language models, 2025. URL https://arxiv.org/abs/2501.09686. +Weihao Zeng, Yuzhen Huang, Qian Liu, Wei Liu, Keqing He, Zejun Ma, and Junxian He. SimpleRL-Zoo: Investigating and taming zero reinforcement learning for open base models in the wild, 2025. URL https://arxiv.org/abs/2503.18892. + +# Appendix + +# A. Cost Breakdown + +This section provides further details on how training data amounts, computational cost, time cost, and performance metrics reported in this paper – particularly those presented in figures like Figures 1 and 3 – were determined and should be interpreted. + +Overall Comparison (Figure 1). For the baseline models included in Figure 1, the approximate training data amounts, computational costs (typically reported as GPU hours or total FLOPs), and training times are sourced from their respective technical reports or publications, leveraging the helpful summary provided in the Open-RS paper (Dang and Ngo, 2025). Reasoning performance scores for all models, encompassing both baselines and our Tina models, stem from results presented in Tables 2 and 3. + +Also, it is crucial to understand the scope of reported costs: + +- Epoch vs. Best Checkpoint: Costs cited for Tina and baseline models reflect the resources needed to complete a full training epoch or a predefined training run, not necessarily the minimal cost to reach the single best-performing checkpoint within that run. +- Training vs. Evaluation: Reported costs cover training only, omitting the computational expense required for model evaluation across benchmarks since such information is missing from several baseline models. + +Particularly, the $9 USD in the abstract represents the estimated cost to train the Tina model up to its best-performing checkpoint and subsequently evaluate that specific checkpoint. For context comparing potential full training runs, the cost to train a Tina model for a complete epoch is$ 14 USD (training only). Including evaluation costs for such a full run would increase the total to approximately $31 USD. We emphasize the $9 as representing the efficient path to the best Tina model. + +FLOPs Estimation (Figure 3). The approximate training FLOPs shown in Figure 3 serve as a hardware-agnostic measure of computational work. For both Tina and baseline models, these values were estimated based on reported training durations and hardware configurations sourced from technical reports or the Open-RS summary, using standard FLOPs calculation methodologies. + +# B. Background behind Tina Training + +# B.1. GRPO Formulation + +Recall the following formulation of GRPO: For each question $q$ , GRPO samples a group $G = \{o_1, o_2, \ldots, o_G\}$ of outputs from the old policy $\pi_{\theta_{\mathrm{old}}}$ and optimizes the policy $\pi_{\theta}$ by maximizing the following objective: + +$$ +\underset { \begin{array}{c} q \sim P (Q), \\ \{o _ {i} \} _ {i = 1} ^ {G} \sim \pi_ {\theta_ {\mathrm {o l d}}} (O | q) \end{array} } {\mathbb {E}} \left[ \frac {1}{G} \sum_ {i = 1} ^ {G} \left(\min \left(\frac {\pi_ {\theta} (o _ {i} | q)}{\pi_ {\theta_ {\mathrm {o l d}}} (o _ {i} | q)} A _ {i}, \operatorname {c l i p p e d} \left(\frac {\pi_ {\theta} (o _ {i} | q)}{\pi_ {\theta_ {\mathrm {o l d}}} (o _ {i} | q)}, 1 - \epsilon , 1 + \epsilon\right) A _ {i}\right) - \beta \mathbb {D} _ {\mathrm {K L}} (\pi_ {\theta} | | \pi_ {\mathrm {r e f}})\right) \right]. +$$ + +Here $A_{i}$ denotes the advantage computed from a group of rewards $\{r_1,r_2,\dots ,r_G\}$ + +$$ +A _ {i} = \frac {r _ {i} - \mathrm {m e a n} (\{r _ {1} , r _ {2} , \ldots , r _ {G} \})}{\mathrm {s t d} (\{r _ {1} , r _ {2} , \ldots , r _ {G} \})}, +$$ + +and + +$$ +\mathbb {D} _ {\mathrm {K L}} (\pi_ {\theta} | | \pi_ {\mathrm {r e f}}) = \frac {\pi_ {\mathrm {r e f}} (o _ {i} | q)}{\pi_ {\theta} (o _ {i} | q)} - \log \frac {\pi_ {\mathrm {r e f}} (o _ {i} | q)}{\pi_ {\theta} (o _ {i} | q)} - 1. +$$ + +Note that $\epsilon$ and $\beta$ are parameters controlling the clipping range and KL penalty, respectively. + +# B.2. LoRA Formulation + +We follow the standard LoRA setup (Hu et al., 2021). Given a frozen pretrained weight matrix $W_0 \in \mathbb{R}^{d \times k}$ and trainable low-rank matrices $A \in \mathbb{R}^{d \times r}$ and $B \in \mathbb{R}^{r \times k}$ with $r \ll \min(d, k)$ , the original forward pass $h(x) = W_0 x$ is modified as + +$$ +\hat {h} (x) = W _ {0} x + A B x. +$$ + +We use the default LoRA implementation provided in the PEFT (Mangrulkar et al., 2022) library. + +# C. Additional Experimental Details + +# C.1. Hyperparameters + +We show our default choice of hyperparameter in Table 5 for all the LoRA-based RL experiments. + +
Tina-STILL-3-1.5B-previewLoRA
Tina-DeepScaleR-1.5B-PreviewLoRA
Tina-Open-RS{X}-{Y}LoRA
Tina-LIMR-{Z}LoRA
Tina-OpenR1LoRA
Tina-OpenThoughtsLoRA
LoRA Modulesquery, key, value, dense
LoRA Rank32
LoRA α128
LoRA Dropout0.05
AlgorithmGRPO
OptimizerAdamW
Optimizer Momentumβ1, β2 = 0.9, 0.999
Learning Rate1e-6
LR SchedulerCosine with Min LR
Warmup Ratio0.1
PrecisionBF16-mixed
Gradient Accumulation Step4
Total Train Batch Size32
Epochs1
Hardware2 × NVIDIA L40S
Max Prompt Length512
Max Completion Length3584
Number of Generation4
Vllm GPU Memory Utilization0.4
Vllm Max Model Length4608
+ +Table 5: Common hyperparameter settings. + +We also show the varied hyperparameter in Table 6 for all the LoRA-based RL experiments. Particularly, all the reward types including Accuracy, Format, Length, Cosine, Tag Count, Reasoning Steps, Repetition Penalty, are defined and implemented by the OpenR1 code repository.4 + +
ModelLoRA RankLoRA AlphaLoRA DropoutAlgorithmLearning RateReward TypeReward Weights
Tina-STILL-3-1.5B-preview-----Accuracy, Length2, 1
Tina-DeepScaleR-1.5B-Preview-----Accuracy, Format2, 1
Tina-Open-RS3-----Cosine, Format2, 1
Tina-Open-RS3-DrGRPO---DrGRPO-Cosine, Format2, 1
Tina-Open-RS2-----Accuracy, Format2, 1
Tina-Open-RS1-----Accuracy, Format2, 1
Tina-LIMR-----Accuracy, Format2, 1
Tina-LIMR-5e-6-lr----5e-6Accuracy, Format2, 1
Tina-LIMR-5e-7-lr----5e-7Accuracy, Format2, 1
Tina-LIMR-64-LoRA-rank64256---Accuracy, Format2, 1
Tina-LIMR-16-LoRA-rank1664---Accuracy, Format2, 1
Tina-LIMR-8-LoRA-rank832---Accuracy, Format2, 1
Tina-LIMR-4-LoRA-rank416---Accuracy, Format2, 1
Accuracy, Cosine, Format, Length, Tag Count, Reasoning Steps, Repetition Penalty1, 1, 1, 1, 1, 1
Tina-OpenR1-----Accuracy, Cosine, Format, Length, Tag Count, Reasoning Steps, Repetition Penalty1, 1, 1, 1, 1, 1
Tina-OpenThoughts-----
+ +Table 6: Varied hyperparameter settings where “-” means unchanged from the common settings in Table 5. + +# C.2. Evaluation Command + +The following is the evaluation command we use to combine lighteval and vLLM for performance evaluation on reasoning tasks. The MODEL_PATH should be replaced with either the local path or huggingface identifier to the model to be evaluated. TASK should be one of the six reasoning tasks including aime24, aime25, amc23, math_500, gpqa: diamond, and minerva. PATH_TO_OPEN_R1_EVALUATEScript should be the path to the custom evaluate script provided by OpenR1. + +```python +MODEL Arguments="pretrained=\(MODEL_PATH, dtype=float16, data_parallel_size=2, max_model_length=32768, gpu_memory Utilization=0.5, generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}" +``` + +```shell +lighteval vllm $MODEL.argS "custom|$TASK|0|0" +--custom-tasks $PATH_TO_OPEN_R1_EVALUATE-script +--use-chat-template +``` + +# D. Full Tina Model Performance Evaluation + +In this section, we present all Tina models' detailed evaluation performance during post-training across six reasoning tasks including AIME24/25, AMC23, MATH500, GPQA and Minerva. + +
CHECKPOINT STEPS (3740 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
50030.0013.3375.0083.6035.8632.3545.02
100036.6720.0065.0084.8032.3227.9444.46
150026.6720.0070.0083.8037.3726.8444.11
200036.6730.0077.5084.6033.3326.8448.16
250033.3330.0070.0083.0035.3527.5746.54
300030.0020.0067.5082.6030.8125.7442.78
350030.0026.6767.5082.2032.3226.1044.13
+ +Table 7: Performance evaluation of Tina-STILL-3-1.5B-preview. + +
CHECKPOINT STEPS (5039 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
50030.0023.3367.5082.4039.3931.2545.65
100043.3326.6767.5086.2037.8828.6848.38
150030.0020.0080.0084.8032.8329.4146.17
200020.0026.6757.5080.6029.2924.2639.72
250013.3316.6752.5075.0031.3118.0134.47
300026.6716.6757.5078.6028.7923.1638.57
350023.3323.3362.5080.4031.8224.2640.94
400020.0020.0070.0082.0041.4127.9443.56
450023.3320.0072.5080.8034.8526.4742.99
500020.0026.6775.0080.8033.3329.4144.20
+ +Table 8: Performance evaluation of Tina-DeepScaleR-1.5B-Preview. + +
CHECKPOINT STEPS (875 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5026.6723.3375.0084.2037.3729.0445.94
10030.0030.0065.0083.0037.3729.7845.86
15036.6716.6765.0084.8027.7827.9443.14
20020.0026.6770.0083.8033.3327.9443.62
25036.6720.0065.0084.6038.3828.3145.49
30033.3326.6770.0085.2030.8130.1546.03
35040.0016.6777.5084.4039.9027.9447.74
40030.0016.6770.0082.8035.8631.2544.43
45036.6726.6770.0085.6033.8432.7247.58
50036.6723.3382.5085.2037.3731.6249.45
55026.6716.6780.0086.0035.3529.7845.75
60030.0026.6770.0084.6037.8829.7846.49
65020.0023.3380.0085.0033.3327.9444.93
70033.3313.3372.5085.0040.4031.9946.09
75033.3323.3375.0083.6031.3127.5745.69
80030.0023.3365.0084.2038.3829.0444.99
85026.6726.6775.0083.8031.8227.9445.32
+ +Table 9: Performance evaluation of Tina-0pen-RS3. + +
CHECKPOINT STEPS (875 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5033.3323.3377.5084.2038.8929.0447.72
10036.6723.3372.5084.2031.3128.6846.12
15040.0023.3372.5085.8030.3030.5147.07
20026.6723.3370.0083.8039.3929.4145.43
25046.6713.3372.5082.6031.8230.5146.24
30030.0026.6775.0084.0033.3329.0446.34
35033.3320.0075.0084.8037.3728.6846.53
40026.6716.6770.0083.2037.3727.5743.58
45043.3326.6777.5087.0036.3632.7250.60
50020.0023.3367.5084.2033.8429.4143.05
55040.0023.3372.5083.6040.9130.8848.54
60033.3320.0072.5084.2032.8330.8845.62
65033.3323.3357.5083.8034.8530.5143.89
70023.3326.6770.0082.4033.3328.6844.07
75030.0023.3372.5084.2038.8929.0446.33
80030.0026.6775.0084.4032.3229.4146.30
85026.6723.3370.0083.8035.8628.6844.72
+ +Table 10: Performance evaluation of Tina-0pen-RS2. + +
CHECKPOINT STEPS (2327 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
40033.3320.0075.0083.8031.8229.7845.62
60030.0030.0077.5084.2034.3431.6247.94
80043.3320.0080.0084.0035.3528.6848.56
100033.3320.0082.5084.4035.8629.7847.64
120036.6720.0067.5084.4037.8830.1546.10
140030.0020.0067.5083.4031.8229.7843.75
160023.3313.3365.0083.4035.8626.8441.29
180026.6720.0075.0084.2034.3427.5744.63
200030.0026.6772.5083.0036.3627.9446.08
220030.0023.3370.0081.4030.8126.4743.67
240030.0023.3367.5081.8030.3027.5743.42
+ +Table 11: Performance evaluation of Tina-0pen-RS1. + +
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
5020.0026.6767.5085.4037.8830.5144.66
10046.6720.0075.0083.8034.8530.5148.47
15026.6720.0072.5084.0037.3730.1545.12
20033.3330.0062.5083.4029.8030.8844.99
+ +Table 12: Performance evaluation of Tina-LIMR. + +
CHECKPOINT STEPS (11716 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
50030.0020.0077.5085.2033.8430.1546.12
100030.0023.3372.5085.6033.8426.6745.32
150036.6726.6775.0086.8039.9030.5149.26
200026.6723.3367.5083.2029.8031.6243.69
250030.0023.3372.5083.8033.8426.8445.05
300020.0030.0067.5084.6034.3428.3144.13
350036.6723.3367.5083.6031.3125.7444.69
+ +Table 13: Performance evaluation of Tina-0penR1. + +
CHECKPOINT STEPS (8259 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
50033.3016.6777.5084.2035.8630.1546.28
100033.3323.3380.0085.2024.7532.7246.56
150030.0023.3370.0086.0037.8829.0446.04
200030.0023.3370.0084.2033.3328.3144.86
250036.6726.6772.5084.8041.4133.0949.19
300026.6723.3375.0083.6034.3432.7245.94
350020.0016.6760.0084.2032.3226.1039.88
400033.3323.3372.5083.6038.3827.9446.51
450030.0020.0065.0085.0033.8426.8443.45
500020.0033.3365.0084.8040.9130.8845.82
+ +Table 14: Performance evaluation of Tina-OpenThoughts. + +
CHECKPOINT STEPS (875 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5033.3316.6775.0083.8037.3726.8445.50
10016.6720.0070.0083.2033.3326.4741.61
15043.3323.3380.0085.0035.3530.1549.53
20030.0023.3370.0084.0039.9028.6845.99
25033.3330.0065.0083.8034.3428.3145.80
30036.6716.6767.5084.4037.8829.7845.48
35026.6730.0075.0084.0037.8829.7847.22
40036.6723.3372.5084.4032.8327.5746.22
45036.6716.6772.5085.6029.2927.5744.72
50030.0020.0072.5085.6037.3729.4145.81
55030.0023.3377.5084.8036.8731.6247.35
60033.3326.6772.5083.8030.3028.3145.82
65026.6720.0077.5082.4037.8827.9445.40
70036.6720.0080.0083.8035.3531.2547.85
75030.0026.6775.0084.2038.8927.5747.06
80020.0030.0075.0082.4035.8628.3145.26
85023.3320.0072.5085.4036.3630.1544.62
+ +Table 15: Performance evaluation of Tina-0pen-RS3-DrGRPO. + +
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5020.0026.6767.5085.4037.8830.5144.66
10046.6720.0075.0083.8034.8530.5148.47
15026.6720.0072.5084.0037.3730.1545.12
20033.3330.0062.5083.4029.8030.8844.99
+ +Table 16: Performance evaluation of Tina-LIMR-5e-6-1r with learning rate 5e-6. + +
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5040.0013.3372.5083.0034.3429.0445.37
10043.3316.6777.5084.6034.8530.5147.91
15030.0023.3372.5086.2037.3730.5146.65
20033.3313.3370.0083.2029.2931.2543.40
+ +Table 17: Performance evaluation of Tina-LIMR-5e-7-1r with learning rate 5e-7. + +
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5020.0030.0077.5084.2038.3831.6246.95
10030.0023.3372.5084.6032.3229.7845.42
15036.6720.0070.0083.4031.8230.8845.46
20033.3320.0072.5085.0029.8029.4145.01
+ +Table 18: Performance evaluation of Tina-LIMR-64-LoRA-rank with LoRA rank 64 and alpha 512. + +
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5033.3323.3362.5084.2038.8931.2545.58
10043.3333.3370.0083.2035.3528.3148.92
15026.6716.6772.5083.4035.3529.0443.94
20036.6720.0075.0083.0039.3930.5147.43
+ +Table 19: Performance evaluation of Tina-LIMR-16-LoRA-rank with LoRA rank 16 and alpha 64. + +
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
5030.0026.6782.5083.8033.8430.5147.89
10026.6716.6772.5084.0036.8729.7844.42
15053.3320.0060.0083.2037.3730.8847.46
20023.3320.0072.5085.4032.8328.6843.86
+ +Table 20: Performance evaluation of Tina-LIMR-8-LoRA-rank with LoRA rank 8 and alpha 32. + +
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
5030.0023.3365.0085.0035.3529.7844.74
10026.6726.6772.5082.8034.8529.0445.42
15036.6720.0085.0083.8031.8229.047.72
20033.3323.3377.5085.4035.8628.3147.29
+ +Table 21: Performance evaluation of Tina-LIMR-4-LoRA-rank with LoRA rank 4 and alpha 16. + +# E. Full Tina Model Training Phase Transition + +In this section, we present all Tina models' training phase transitions along the training dynamics. Specifically, we observe clear phase transitions in the training of Tina-DeepScaleR-1.5B-Preview, Tina-STILL-3-1.5B-preview, Tina-Open-RS1, Tina-Open-RS2, Tina-Open-RS3, and Tina-Open-RS3-GRPO, as shown in Figures 5, 6, and 7. For Tina-OpenR1 and Tina-Thoughts (Figures 8 and 9), the observation is similar, except the best-performing checkpoint is achieved after the training turning point, rather than before. However, we do not observe such a transition in all Tina variants on the LIMR dataset, as shown in Figures 10, 11, and 12, possibly because its small data size leads to training periods which are too brief to extract meaningful information. + +![](images/d353ef6fc3f55dfc1422f1740441c1355ae02f826e634eac7f3afe7a8106e2b5.jpg) + +![](images/e227c2e22316cb4b9419f97f054e7fabe4740818619f91e7c8b4fe40c3152bac.jpg) + +![](images/0e2732e82b9d5d73f809168cfbc974b98f9f0044648299abe8a4b63ae8b60533.jpg) + +![](images/94c0a15fc368a7728149a7406671dbf227cdd83e77da83147f99fd9a06986856.jpg) + +![](images/14dd99703ddfbd21f42c03322ea1708b90ea5a70843aba660bc25ee705707508.jpg) +Figure 5: Phase transition in Tina-DeepScaleR-1.5B-Preview and Tina-STILL-3-1.5B-Preview. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1. + +![](images/0b36de5f11dfc263cdad476fb93fe133556cacd1abefa32b7b6253470e317fec.jpg) + +![](images/bc4af8c268796a19b9e7ca41828353aa9b004373143e6b9529eeeb8ed4804fab.jpg) + +![](images/b191a5bf97faa7c45c1fee1dc686ecc9bfe74d6f70924690bd7ccbbb418b18d1.jpg) + +![](images/473a1311c96974b772b1c964513df3b56b622e527385e32bcac20487ab0f608b.jpg) + +![](images/125d835e1868d60ed3c9237be1434952fbe0d18bf4c96c75bd2bcb9459f8c2c9.jpg) + +![](images/cb82a48a302ec32484b525d394a3919acf1c3e6f8775d6f31572654513eb4bf8.jpg) +Figure 6: Phase transition in Tina-0pen-RS1 and Tina-0pen-RS2. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1. + +![](images/00890f19ec10de6d30defeffa76f84c3755edb43396146fd6ecd1a3608db87a5.jpg) + +![](images/552fe327e350e53b6edd0dae81e7b1d91bfa404f3cf9e8181fcb5171edcfe6e2.jpg) + +![](images/6860139cead7f0972829f859c4daa9a36307284fcbfceeda15ecec531e559f3c.jpg) + +![](images/496fcd61ed8d480b0945f217eb5a6f72daca5483eeed7daf40f65de2b5759fde.jpg) + +![](images/da84d29ddbc030bb2732b675abae652384353c1ddbd7c6ca78bb6eac68830c8a.jpg) + +![](images/2ada93971be1b948ee6a2f2f11c7befda25eb181ff27c2b09831de984ff64ceb.jpg) +Figure 7: Phase transition in Tina-Open-RS3 and Tina-Open-RS3-GRPO. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1. + +![](images/6cab3827b7f7c4ceceade4215a0f8483d0bde3adef8952c5ce5f9ed175c0d13d.jpg) + +![](images/9cea77735d3cf6c3ebdecad0073a818a5f4f1cca13aa643d18fce481c4540429.jpg) + +![](images/a288133d71b7c9dc147e7ee5f78170f5e2d79b32a4c40cea023285708958f07c.jpg) + +![](images/4ccbe6938b7d744c676eb276e92dafd3e742f1a5f1bd949cac46ae8d3ac3be6e.jpg) + +![](images/bf53e4cf04bfe1cc8a3830d64c251087ca403806e1f7125338cfdeb5cdd2d04b.jpg) + +![](images/38be411b859adab5f6a4a1d0e537338c225d76824738d7c50323bcdde5094d26.jpg) + +![](images/35c5b458c2182b9b490a45b3816e4fc2f50b5128398cf5a2ca7d6ddf42470a1b.jpg) + +![](images/1f8ce392dcce7770903d3307c0a3c06e266cc11aacf0341e740d2ab6a0d0da40.jpg) +Figure 8: Phase transition in Tina-0penR1. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1. + +![](images/d77248ceec0597e3173ad7abb2455e9c6b9bd88d21d80b0ff7244825d3df6bc0.jpg) + +![](images/48735e2ffbf120d3300187ac8dc036e9eb73a17de816b992af60c8f0bba9dee0.jpg) + +![](images/bc9a0f1c786c904a3f69ed882f4cf22f1a6a30c05c7bf904d4badae2ee5de727.jpg) + +![](images/7687d000e20a32466588a78df3334c316590d1f20e9a7d47bf25e95f2358b2fc.jpg) + +![](images/e12f87e58a46f826adb346894e3dc14c83b821d5f0533d83abf938406e0ec4c7.jpg) + +![](images/30264a3080c01d7d363c382e51e0d8e0b3db806e01af37dd0be555d1b1175270.jpg) + +![](images/93c8ae4fc2db00f11f3c86a37f454a2dd65aef08a5cee4ddac7578f774212764.jpg) + +![](images/d0a6b65304f02a110defa4ef71b15594342b223bbbf05dc73dd44d06fd2fb35c.jpg) +Figure 9: Phase transition in Tina-OpenThoughts. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1. + +![](images/1f769eabe9d3b95792ee514dca322f1a413030ec9276fa2fa355fb3eeeebcc94.jpg) + +![](images/6863227a9bb5aee1c96f10a6063b1ed571d1bf23545e337ee8f67927af9938c9.jpg) + +![](images/8e7d37997e4454107c9b3f1d5d83cd73d2c4e31cb6d40d54edf747a32eeab3c1.jpg) + +![](images/e10e05106fdbb6069713e797be132f798dd8dd77c486e0e23785c982fa11ca63.jpg) + +![](images/f82684fd818799f06d8422102e17ce5a54eb80cd17d33223ef24703ede35a673.jpg) + +![](images/94b7e9fa1ffeecc96b0ef41ea51000b26d02f97e7c1e3593294689e973f839ad.jpg) + +![](images/e26a69ce1c5d5ce240e9dc3cff8e42ad98280111c18653b9dbec50810ca60eca.jpg) + +![](images/5a9ddd7c6ab7cbde615412641ceea6c25f0f3366649e5f32dd25392dc06b6c70.jpg) +Figure 10: Phase transition in Tina-LIMR, Tina-LIMR-64-LoRA-rank and Tina-LIMR-16-LoRA-rank. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1. + +![](images/ed65f9fd40831f8b3283fbd88857fbde1efe7a1a747af3aad5020ea0ab07ecfb.jpg) + +![](images/04e83e21598d670a72734bc1836dedfcb3fa2769f649de2a33509249caabd40a.jpg) + +![](images/919f8c76c34c77a7b4d22d069a3190f1356853252c2e8e191e1d7805afc3a619.jpg) + +![](images/680477ed58ba6ca295f1e70b13709cb218a8a1b3b052246ddbe20f98c2db2562.jpg) + +![](images/15737af1ce0d1a20f369f184d8ff8cb4509c833fe136e34db3dc3a6b909812f2.jpg) + +![](images/7c0cee9452adf0935fd7975a14037e4d33ede03232890ec890460647a16fa1c2.jpg) + +![](images/e293c0018a412234e72b8b60b87a9e3dca0699425d5b9a868e7d6995c7a461f3.jpg) +Figure 11: Phase transition in Tina-LIMR-8-LoRA-rank and Tina-LIMR-4-LoRA-rank. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1. + +![](images/bbe93058d5e119281dbe2a2a7453e28bb4b576cf301184abcc57d8ced53d3f62.jpg) + +![](images/f4186b7a62fdd3d663ffed363a38e286da6614aad9564a13f40b27a02c1f824c.jpg) + +![](images/40017567513cbf6d1900af9033c2f09bd1c6aef4b039218e1da821aa0c9116ef.jpg) + +![](images/74cbc8332e1e8c7a7c24513fe7fc932848cb5796a13820024616f51ed8f084a3.jpg) + +![](images/47eb8632f9c85c72c92536872469d78675758b2e5cda7141a97c301b03c3f345.jpg) + +![](images/92d66da5abf488c5b52ee99dc185b1dc09d8af4a68e2fbe8d2d7f207a0009495.jpg) +Figure 12: Phase transition in Tina-LIMR-5e-6-1r and Tina-LIMR-5e-7-1r. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1. + +![](images/9963c4c73368a08be9b4ba416a12dbe2b4ef4ec1b7263f7133125609784b4ce9.jpg) \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15777/images/00890f19ec10de6d30defeffa76f84c3755edb43396146fd6ecd1a3608db87a5.jpg b/data/2025/2504_15xxx/2504.15777/images/00890f19ec10de6d30defeffa76f84c3755edb43396146fd6ecd1a3608db87a5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..efab61e83dca47fd2bee1b2413311df059f1a3a2 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/00890f19ec10de6d30defeffa76f84c3755edb43396146fd6ecd1a3608db87a5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31288dd990e8260503221efed1007fe1c1c3bb9c82cb04a6461cb44dac9d430a +size 40929 diff --git a/data/2025/2504_15xxx/2504.15777/images/04e83e21598d670a72734bc1836dedfcb3fa2769f649de2a33509249caabd40a.jpg b/data/2025/2504_15xxx/2504.15777/images/04e83e21598d670a72734bc1836dedfcb3fa2769f649de2a33509249caabd40a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c529c8d022faa38910a285cba06e1f32a2d3f03a --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/04e83e21598d670a72734bc1836dedfcb3fa2769f649de2a33509249caabd40a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5445de669c7d07c4bd49de45a7a3c712874918ff6a60900d6d3f24cf86b81bd +size 22910 diff --git a/data/2025/2504_15xxx/2504.15777/images/0b36de5f11dfc263cdad476fb93fe133556cacd1abefa32b7b6253470e317fec.jpg b/data/2025/2504_15xxx/2504.15777/images/0b36de5f11dfc263cdad476fb93fe133556cacd1abefa32b7b6253470e317fec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..35cd4967a1642de943692f780a90aca30bad6d9d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/0b36de5f11dfc263cdad476fb93fe133556cacd1abefa32b7b6253470e317fec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35e213044fbfd27a786af5ecc46d64ddf89126f9ab95ab6874086df6cadcb8e0 +size 45209 diff --git a/data/2025/2504_15xxx/2504.15777/images/0e2732e82b9d5d73f809168cfbc974b98f9f0044648299abe8a4b63ae8b60533.jpg b/data/2025/2504_15xxx/2504.15777/images/0e2732e82b9d5d73f809168cfbc974b98f9f0044648299abe8a4b63ae8b60533.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a629223b5be4bdf832486b88baf668a991aa9fd3 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/0e2732e82b9d5d73f809168cfbc974b98f9f0044648299abe8a4b63ae8b60533.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e94d97e66bb9916d5746d0acaafce3f3c7af77e46a911c29e9ab301b7a7b517d +size 43696 diff --git a/data/2025/2504_15xxx/2504.15777/images/125d835e1868d60ed3c9237be1434952fbe0d18bf4c96c75bd2bcb9459f8c2c9.jpg b/data/2025/2504_15xxx/2504.15777/images/125d835e1868d60ed3c9237be1434952fbe0d18bf4c96c75bd2bcb9459f8c2c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f9228e7059d82972ab2a374a053d13df5152818e --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/125d835e1868d60ed3c9237be1434952fbe0d18bf4c96c75bd2bcb9459f8c2c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40d1c03e458ece44685760527f57fb55d17f8358ddf2d669cc41a5b9dc8c4b27 +size 40009 diff --git a/data/2025/2504_15xxx/2504.15777/images/14dd99703ddfbd21f42c03322ea1708b90ea5a70843aba660bc25ee705707508.jpg b/data/2025/2504_15xxx/2504.15777/images/14dd99703ddfbd21f42c03322ea1708b90ea5a70843aba660bc25ee705707508.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b7f2518386ed1718bf2b75937e48937bfce21b7 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/14dd99703ddfbd21f42c03322ea1708b90ea5a70843aba660bc25ee705707508.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cd0352ef0b8d08699ee29b2b29bf8738dc03734821f1b80b47709677d9151d9 +size 42744 diff --git a/data/2025/2504_15xxx/2504.15777/images/15737af1ce0d1a20f369f184d8ff8cb4509c833fe136e34db3dc3a6b909812f2.jpg b/data/2025/2504_15xxx/2504.15777/images/15737af1ce0d1a20f369f184d8ff8cb4509c833fe136e34db3dc3a6b909812f2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..41473dd6209d2c661435a7ee9b73f094e05cefd1 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/15737af1ce0d1a20f369f184d8ff8cb4509c833fe136e34db3dc3a6b909812f2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8907e5a1872bbae8ff5d8105283fc82d2e201e3c9d0717ea5cc962ec054fd2f +size 35854 diff --git a/data/2025/2504_15xxx/2504.15777/images/1a3b1780e0c1635d42f9fb927eb80189e6d0502e9d868b337a308db1217364ff.jpg b/data/2025/2504_15xxx/2504.15777/images/1a3b1780e0c1635d42f9fb927eb80189e6d0502e9d868b337a308db1217364ff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..54672a5d825f95153cd27e753e30f51443c10c90 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/1a3b1780e0c1635d42f9fb927eb80189e6d0502e9d868b337a308db1217364ff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:faec30fc909a6ff74242f874b438e746360cd2533a0f5f18fb030635d06f0064 +size 44999 diff --git a/data/2025/2504_15xxx/2504.15777/images/1ee0c6d37cc65fe717551ff9bea8909244dc02f1b5139c065c55b0e684f3a275.jpg b/data/2025/2504_15xxx/2504.15777/images/1ee0c6d37cc65fe717551ff9bea8909244dc02f1b5139c065c55b0e684f3a275.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8eb8871dbb027037c3bb6beddad34532a1f02492 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/1ee0c6d37cc65fe717551ff9bea8909244dc02f1b5139c065c55b0e684f3a275.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d062eaa88bc6b1ba519fd5972a16e6cc8180ce1848c01ccaa46a1cb622f097eb +size 91266 diff --git a/data/2025/2504_15xxx/2504.15777/images/1f769eabe9d3b95792ee514dca322f1a413030ec9276fa2fa355fb3eeeebcc94.jpg b/data/2025/2504_15xxx/2504.15777/images/1f769eabe9d3b95792ee514dca322f1a413030ec9276fa2fa355fb3eeeebcc94.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c8b9618e5783daa9dd661c5aad8f437b575edda8 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/1f769eabe9d3b95792ee514dca322f1a413030ec9276fa2fa355fb3eeeebcc94.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ac4408695faaa8330ae74bea3eae6b397250fa8da0ae99677b9e3a364da8ce2 +size 44707 diff --git a/data/2025/2504_15xxx/2504.15777/images/1f8ce392dcce7770903d3307c0a3c06e266cc11aacf0341e740d2ab6a0d0da40.jpg b/data/2025/2504_15xxx/2504.15777/images/1f8ce392dcce7770903d3307c0a3c06e266cc11aacf0341e740d2ab6a0d0da40.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f40ef4d7e8ce5035e47d8f3323af24569ca4cb50 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/1f8ce392dcce7770903d3307c0a3c06e266cc11aacf0341e740d2ab6a0d0da40.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd84d32f7eeea081ce8ac7b9251775a6d5c4178c650a8164e0c5afa0d3476bd7 +size 37693 diff --git a/data/2025/2504_15xxx/2504.15777/images/259a17837b341a15b4289b19971555d0291ddd0ca62202c001f2e7d7e7a57c66.jpg b/data/2025/2504_15xxx/2504.15777/images/259a17837b341a15b4289b19971555d0291ddd0ca62202c001f2e7d7e7a57c66.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6c44e7a1e5cfed2677489825a230f73a28a3e5af --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/259a17837b341a15b4289b19971555d0291ddd0ca62202c001f2e7d7e7a57c66.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:869ce83aa044ca46df139ac7324ebe12e8d2f10051fa05d4ba9d97bdfa295615 +size 72796 diff --git a/data/2025/2504_15xxx/2504.15777/images/2863c059feaad5bb8d161330289c274b045111a2d87ecc5977c5da81987d3f33.jpg b/data/2025/2504_15xxx/2504.15777/images/2863c059feaad5bb8d161330289c274b045111a2d87ecc5977c5da81987d3f33.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3793d3c6ca1eac5bb3b76c433fa6b3fedfb1ccf1 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/2863c059feaad5bb8d161330289c274b045111a2d87ecc5977c5da81987d3f33.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a30a66eab77911bc79d569881be4050aada5abbaa3a2043d76828ac2d5e23bbc +size 48232 diff --git a/data/2025/2504_15xxx/2504.15777/images/28a2994065a07ca1b55ef8c44cddfb26dc2e52400324d7f5375371de6576060e.jpg b/data/2025/2504_15xxx/2504.15777/images/28a2994065a07ca1b55ef8c44cddfb26dc2e52400324d7f5375371de6576060e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..779a1a49eeb08a7e8ca51296d589b211cd644bdf --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/28a2994065a07ca1b55ef8c44cddfb26dc2e52400324d7f5375371de6576060e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f6bc03978fb9a061e88d4d4d1852b8680068be4ba4ed3667d4097c3ab448a44 +size 3300 diff --git a/data/2025/2504_15xxx/2504.15777/images/2ada93971be1b948ee6a2f2f11c7befda25eb181ff27c2b09831de984ff64ceb.jpg b/data/2025/2504_15xxx/2504.15777/images/2ada93971be1b948ee6a2f2f11c7befda25eb181ff27c2b09831de984ff64ceb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4e869832e9830ee5de7735726bdaadc0748e720d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/2ada93971be1b948ee6a2f2f11c7befda25eb181ff27c2b09831de984ff64ceb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c9005762572d26b6745190115c36f1f1af812b2b7f3037a1831cdbdd2ed6e96 +size 42557 diff --git a/data/2025/2504_15xxx/2504.15777/images/2d53ec46e288acc29b44cc0d655077b12bf43b901a29c7b20ed7cb6a8828df61.jpg b/data/2025/2504_15xxx/2504.15777/images/2d53ec46e288acc29b44cc0d655077b12bf43b901a29c7b20ed7cb6a8828df61.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e47cc84622bf86df372ec0bee248293096378dc7 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/2d53ec46e288acc29b44cc0d655077b12bf43b901a29c7b20ed7cb6a8828df61.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbf6d85a86078a735acf824cfc201c8a2b318eb7dc10b758147738216c8ac4bb +size 21779 diff --git a/data/2025/2504_15xxx/2504.15777/images/2f6aefc175134f265b1b159383b902b03d40a9cce69d5e4661266307f817b9d4.jpg b/data/2025/2504_15xxx/2504.15777/images/2f6aefc175134f265b1b159383b902b03d40a9cce69d5e4661266307f817b9d4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..61b7bb6bc06f46f1b74b0aac79bfe29d82419168 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/2f6aefc175134f265b1b159383b902b03d40a9cce69d5e4661266307f817b9d4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11e8075769ce82b1c56d52d2ddbe53b0af7758aa310d7f678141f2a973085700 +size 47964 diff --git a/data/2025/2504_15xxx/2504.15777/images/30264a3080c01d7d363c382e51e0d8e0b3db806e01af37dd0be555d1b1175270.jpg b/data/2025/2504_15xxx/2504.15777/images/30264a3080c01d7d363c382e51e0d8e0b3db806e01af37dd0be555d1b1175270.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fdc2a2230f7a6984672575c79e3d46a07e6d96de --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/30264a3080c01d7d363c382e51e0d8e0b3db806e01af37dd0be555d1b1175270.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ba3950e8d8c066eb855ff3aa68c68636a0454c3e215574b41440353f84af3c2 +size 42845 diff --git a/data/2025/2504_15xxx/2504.15777/images/34438dc604081a53d0f3b2eb9a2446c6116792b8fa9876c28dc8d52561641a4a.jpg b/data/2025/2504_15xxx/2504.15777/images/34438dc604081a53d0f3b2eb9a2446c6116792b8fa9876c28dc8d52561641a4a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..099327e548f525cc30c40f9377aca9c3cac0f771 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/34438dc604081a53d0f3b2eb9a2446c6116792b8fa9876c28dc8d52561641a4a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35af3f506c5725d182edab701d2979dd623fb875e6f1932079c06589d1542698 +size 145250 diff --git a/data/2025/2504_15xxx/2504.15777/images/35c5b458c2182b9b490a45b3816e4fc2f50b5128398cf5a2ca7d6ddf42470a1b.jpg b/data/2025/2504_15xxx/2504.15777/images/35c5b458c2182b9b490a45b3816e4fc2f50b5128398cf5a2ca7d6ddf42470a1b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..95756e3d7285c4fcfe410008835211d6ab37688b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/35c5b458c2182b9b490a45b3816e4fc2f50b5128398cf5a2ca7d6ddf42470a1b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:976bc38d8f72ebebb5d79692b97594a4a017f76cd02f0bace511608095d9b757 +size 47729 diff --git a/data/2025/2504_15xxx/2504.15777/images/38be411b859adab5f6a4a1d0e537338c225d76824738d7c50323bcdde5094d26.jpg b/data/2025/2504_15xxx/2504.15777/images/38be411b859adab5f6a4a1d0e537338c225d76824738d7c50323bcdde5094d26.jpg new file mode 100644 index 0000000000000000000000000000000000000000..483187998aa5570ff4d7cc211ecf04d7ac689fa3 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/38be411b859adab5f6a4a1d0e537338c225d76824738d7c50323bcdde5094d26.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2453486dd75e39c46b7f45b571d2a9e59f7148df894dd7f27d9904510bbb376c +size 42998 diff --git a/data/2025/2504_15xxx/2504.15777/images/3bceae34b8cb8c4220f13aedaf6a0d44948a79c1bbfe8ae7d3cc92a35320ed29.jpg b/data/2025/2504_15xxx/2504.15777/images/3bceae34b8cb8c4220f13aedaf6a0d44948a79c1bbfe8ae7d3cc92a35320ed29.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6c5baea8dc743f74ed688094c2d35598e42ceb2b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/3bceae34b8cb8c4220f13aedaf6a0d44948a79c1bbfe8ae7d3cc92a35320ed29.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8cd410db6694a0f59f7af83d9e14f7249540f0eb3c1c63471c45d03ac4cb97d7 +size 47656 diff --git a/data/2025/2504_15xxx/2504.15777/images/3f7a8c1ad3cb2877b0af8755abcdcfe54a8790edb795d23763fd06181cd40a01.jpg b/data/2025/2504_15xxx/2504.15777/images/3f7a8c1ad3cb2877b0af8755abcdcfe54a8790edb795d23763fd06181cd40a01.jpg new file mode 100644 index 0000000000000000000000000000000000000000..399191910da19db3046110a8ac0a92046dac3680 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/3f7a8c1ad3cb2877b0af8755abcdcfe54a8790edb795d23763fd06181cd40a01.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df85fadbe35f6881701494cc7a325b583e5fc99c6070f2011976b9a5d78219d2 +size 47703 diff --git a/data/2025/2504_15xxx/2504.15777/images/40017567513cbf6d1900af9033c2f09bd1c6aef4b039218e1da821aa0c9116ef.jpg b/data/2025/2504_15xxx/2504.15777/images/40017567513cbf6d1900af9033c2f09bd1c6aef4b039218e1da821aa0c9116ef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87a39e3eb4e9a1ca3e111bda9e3463a7dedea528 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/40017567513cbf6d1900af9033c2f09bd1c6aef4b039218e1da821aa0c9116ef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ee366fc20515c4111f1ddedb23f93b7685148f83b964e2835e6f3da4494ed1d +size 34233 diff --git a/data/2025/2504_15xxx/2504.15777/images/473a1311c96974b772b1c964513df3b56b622e527385e32bcac20487ab0f608b.jpg b/data/2025/2504_15xxx/2504.15777/images/473a1311c96974b772b1c964513df3b56b622e527385e32bcac20487ab0f608b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1199b15941c8c1cb546486316c63de3dbfa65058 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/473a1311c96974b772b1c964513df3b56b622e527385e32bcac20487ab0f608b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9139f2c1524cdf8478595401d3a6ef4be24d984bc5e5ec6daa2f4386eda3082 +size 40828 diff --git a/data/2025/2504_15xxx/2504.15777/images/47eb8632f9c85c72c92536872469d78675758b2e5cda7141a97c301b03c3f345.jpg b/data/2025/2504_15xxx/2504.15777/images/47eb8632f9c85c72c92536872469d78675758b2e5cda7141a97c301b03c3f345.jpg new file mode 100644 index 0000000000000000000000000000000000000000..db62e7b06f46fb59e857b45331834daa3df4b6ff --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/47eb8632f9c85c72c92536872469d78675758b2e5cda7141a97c301b03c3f345.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10c5ddd71f4b029011e16fb91e06e6edfadc59ff76a042e78f678f73b742a4b3 +size 35014 diff --git a/data/2025/2504_15xxx/2504.15777/images/48735e2ffbf120d3300187ac8dc036e9eb73a17de816b992af60c8f0bba9dee0.jpg b/data/2025/2504_15xxx/2504.15777/images/48735e2ffbf120d3300187ac8dc036e9eb73a17de816b992af60c8f0bba9dee0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc01bc174cd2766647c6245d842e87882f293ff3 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/48735e2ffbf120d3300187ac8dc036e9eb73a17de816b992af60c8f0bba9dee0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b75835ec662f60b36e519ae947007c0064dbf4044da1c8f09f334e442747120 +size 42062 diff --git a/data/2025/2504_15xxx/2504.15777/images/489481ae846805813de3af37b2eba7e7c9d549e8a430de5eda6c253bfc5d0a50.jpg b/data/2025/2504_15xxx/2504.15777/images/489481ae846805813de3af37b2eba7e7c9d549e8a430de5eda6c253bfc5d0a50.jpg new file mode 100644 index 0000000000000000000000000000000000000000..74f1ed3a195009ee6c93f41ec5ee4c14a795380b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/489481ae846805813de3af37b2eba7e7c9d549e8a430de5eda6c253bfc5d0a50.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17a0aa126e92a4d6630b5eceb2a43ed4675660796fb76a9f05a181db9da9866d +size 72083 diff --git a/data/2025/2504_15xxx/2504.15777/images/496fcd61ed8d480b0945f217eb5a6f72daca5483eeed7daf40f65de2b5759fde.jpg b/data/2025/2504_15xxx/2504.15777/images/496fcd61ed8d480b0945f217eb5a6f72daca5483eeed7daf40f65de2b5759fde.jpg new file mode 100644 index 0000000000000000000000000000000000000000..48cefc6e03268c5802c7dbdd0d8a8cae1e65ba55 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/496fcd61ed8d480b0945f217eb5a6f72daca5483eeed7daf40f65de2b5759fde.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a49a3d1cb81a5ef5a53e0a4a8c66e39e314099880d435589ec2ac6345a4c7d56 +size 41905 diff --git a/data/2025/2504_15xxx/2504.15777/images/4ca7537e6fe64a4ec131c6ed4f8cf75a1a629a084b6eeac69db32310ad2ec862.jpg b/data/2025/2504_15xxx/2504.15777/images/4ca7537e6fe64a4ec131c6ed4f8cf75a1a629a084b6eeac69db32310ad2ec862.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9983c61cd10041cada25d86d4492acbce70f7186 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/4ca7537e6fe64a4ec131c6ed4f8cf75a1a629a084b6eeac69db32310ad2ec862.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c613a873d386ba7b4a70ff7bca3b2e495326aed12fbc2ea2493c0242a83d836 +size 145993 diff --git a/data/2025/2504_15xxx/2504.15777/images/4ccbe6938b7d744c676eb276e92dafd3e742f1a5f1bd949cac46ae8d3ac3be6e.jpg b/data/2025/2504_15xxx/2504.15777/images/4ccbe6938b7d744c676eb276e92dafd3e742f1a5f1bd949cac46ae8d3ac3be6e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7e30277231a66c6f76773fdf4ab5784aae5accb1 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/4ccbe6938b7d744c676eb276e92dafd3e742f1a5f1bd949cac46ae8d3ac3be6e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:424c79395ed0cad7fc5a7383ae684f89e29495f452c9dfa0df0cf4ce67a49c08 +size 30004 diff --git a/data/2025/2504_15xxx/2504.15777/images/4e7e960fb8433289bbb35bd1a4c84458c12331f5c0c13f4f336813af762fbaef.jpg b/data/2025/2504_15xxx/2504.15777/images/4e7e960fb8433289bbb35bd1a4c84458c12331f5c0c13f4f336813af762fbaef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5a2367c023455c470fd6f8127ecd7e98057b9d06 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/4e7e960fb8433289bbb35bd1a4c84458c12331f5c0c13f4f336813af762fbaef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:147a8724062b73a48334a5c7b2da6ffde5aae1391ac86d4e2dd895ccc1edb1f1 +size 69154 diff --git a/data/2025/2504_15xxx/2504.15777/images/552fe327e350e53b6edd0dae81e7b1d91bfa404f3cf9e8181fcb5171edcfe6e2.jpg b/data/2025/2504_15xxx/2504.15777/images/552fe327e350e53b6edd0dae81e7b1d91bfa404f3cf9e8181fcb5171edcfe6e2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..204d5706e792322e26a21085993936ae5ac5f5ed --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/552fe327e350e53b6edd0dae81e7b1d91bfa404f3cf9e8181fcb5171edcfe6e2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:caec71926fdf9a729947d33847f8f5386e204a60a5c33abbdca5c4a585a96fde +size 40955 diff --git a/data/2025/2504_15xxx/2504.15777/images/56c0a1d34fc3c6892dc823d2eada4025b45a291b64ab9e854d5acd3cf67be71c.jpg b/data/2025/2504_15xxx/2504.15777/images/56c0a1d34fc3c6892dc823d2eada4025b45a291b64ab9e854d5acd3cf67be71c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ed8d5dcc5d012882055f3724800a1f2428f5faad --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/56c0a1d34fc3c6892dc823d2eada4025b45a291b64ab9e854d5acd3cf67be71c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ad046df7df940baafb7ef4ae2aafe0d4e7969d9380fa7f886fd5143781ebb74 +size 80126 diff --git a/data/2025/2504_15xxx/2504.15777/images/57521b0bd22d4166810982ee97c9c205d00f9dd5807c6052a1f6dc3adb4e9981.jpg b/data/2025/2504_15xxx/2504.15777/images/57521b0bd22d4166810982ee97c9c205d00f9dd5807c6052a1f6dc3adb4e9981.jpg new file mode 100644 index 0000000000000000000000000000000000000000..06e2cbe8ce5162a762b471336a6853b1038bf763 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/57521b0bd22d4166810982ee97c9c205d00f9dd5807c6052a1f6dc3adb4e9981.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1be25b4efacd2c4fb64b656e89086de0f7e149c5337299ba25e2eb4391b0813 +size 7307 diff --git a/data/2025/2504_15xxx/2504.15777/images/59fd125d2f1a13bd4fccfe6713425a2c004cd392986e4c427eb467173796bb57.jpg b/data/2025/2504_15xxx/2504.15777/images/59fd125d2f1a13bd4fccfe6713425a2c004cd392986e4c427eb467173796bb57.jpg new file mode 100644 index 0000000000000000000000000000000000000000..26f97c0b36e2c3ec891da9d97d79443b8e1d87d2 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/59fd125d2f1a13bd4fccfe6713425a2c004cd392986e4c427eb467173796bb57.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2be28c270f8e92b3290c85e70fff0d6a05f880280fd151c96a7d2e864b86c4dd +size 39443 diff --git a/data/2025/2504_15xxx/2504.15777/images/5a9ddd7c6ab7cbde615412641ceea6c25f0f3366649e5f32dd25392dc06b6c70.jpg b/data/2025/2504_15xxx/2504.15777/images/5a9ddd7c6ab7cbde615412641ceea6c25f0f3366649e5f32dd25392dc06b6c70.jpg new file mode 100644 index 0000000000000000000000000000000000000000..43fb53d81080b9d1baa15876f2cad4a8761ca04a --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/5a9ddd7c6ab7cbde615412641ceea6c25f0f3366649e5f32dd25392dc06b6c70.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ae4530c6deeb0ea37e5e62e3b99f992c05b186c18e148cf83bf74218fd2cbb4 +size 23609 diff --git a/data/2025/2504_15xxx/2504.15777/images/5ca376c85eec1315968cb1b29eabeab870f772630b1bec03adc8b5ab0477fc7f.jpg b/data/2025/2504_15xxx/2504.15777/images/5ca376c85eec1315968cb1b29eabeab870f772630b1bec03adc8b5ab0477fc7f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f036a709c06b1b7deafc9642d00620f2cf49411d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/5ca376c85eec1315968cb1b29eabeab870f772630b1bec03adc8b5ab0477fc7f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86b6c4e136e8019f6556577daf7cdc83c3842b6ca0f59b8a1584c95f78799885 +size 45868 diff --git a/data/2025/2504_15xxx/2504.15777/images/680477ed58ba6ca295f1e70b13709cb218a8a1b3b052246ddbe20f98c2db2562.jpg b/data/2025/2504_15xxx/2504.15777/images/680477ed58ba6ca295f1e70b13709cb218a8a1b3b052246ddbe20f98c2db2562.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45d0155b0b6e070ea228fedf49d6fc05409a6e56 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/680477ed58ba6ca295f1e70b13709cb218a8a1b3b052246ddbe20f98c2db2562.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09f3a2f8beb2cf4b738f7e85b10bba9dd176e8f2227a9d85568fa2b0caf8545a +size 35870 diff --git a/data/2025/2504_15xxx/2504.15777/images/6860139cead7f0972829f859c4daa9a36307284fcbfceeda15ecec531e559f3c.jpg b/data/2025/2504_15xxx/2504.15777/images/6860139cead7f0972829f859c4daa9a36307284fcbfceeda15ecec531e559f3c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68da5a16d281d0b42a983b9fd229236cb9ca7b7d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/6860139cead7f0972829f859c4daa9a36307284fcbfceeda15ecec531e559f3c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e17f510e25147687502d83f7f2d5fb564d761bb9648b6035bd3053c8ac6ded8 +size 38973 diff --git a/data/2025/2504_15xxx/2504.15777/images/6863227a9bb5aee1c96f10a6063b1ed571d1bf23545e337ee8f67927af9938c9.jpg b/data/2025/2504_15xxx/2504.15777/images/6863227a9bb5aee1c96f10a6063b1ed571d1bf23545e337ee8f67927af9938c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5c6989c5f56346f44540f41dcdb928bd06c176d9 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/6863227a9bb5aee1c96f10a6063b1ed571d1bf23545e337ee8f67927af9938c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c42ad7dc29e22afaede1089c5d64238cf5ee99e194aaea5eb44d301402b121ba +size 21268 diff --git a/data/2025/2504_15xxx/2504.15777/images/6c6d44847f89dd7f528dfee2e033953d575eac2bf14e954d1c4d9a807378e5d6.jpg b/data/2025/2504_15xxx/2504.15777/images/6c6d44847f89dd7f528dfee2e033953d575eac2bf14e954d1c4d9a807378e5d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a125c62afd407cb6aec845716e3dcbc0cb50f1e6 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/6c6d44847f89dd7f528dfee2e033953d575eac2bf14e954d1c4d9a807378e5d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b42e6c431c31066a17cba7a7939f67c91b021445c23adf67dec5504bcef2458 +size 14887 diff --git a/data/2025/2504_15xxx/2504.15777/images/6cab3827b7f7c4ceceade4215a0f8483d0bde3adef8952c5ce5f9ed175c0d13d.jpg b/data/2025/2504_15xxx/2504.15777/images/6cab3827b7f7c4ceceade4215a0f8483d0bde3adef8952c5ce5f9ed175c0d13d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ed09f012be2601ecef868708d19ac736a54c15b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/6cab3827b7f7c4ceceade4215a0f8483d0bde3adef8952c5ce5f9ed175c0d13d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d94dad4791068b23d58549a82bcda78e8309995225785447efd3f8ec12affd9 +size 42170 diff --git a/data/2025/2504_15xxx/2504.15777/images/74cbc8332e1e8c7a7c24513fe7fc932848cb5796a13820024616f51ed8f084a3.jpg b/data/2025/2504_15xxx/2504.15777/images/74cbc8332e1e8c7a7c24513fe7fc932848cb5796a13820024616f51ed8f084a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..10f2e90418015e18136b58b622dcf764f56a58cb --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/74cbc8332e1e8c7a7c24513fe7fc932848cb5796a13820024616f51ed8f084a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90ea905c0e8fa12db52c3c23c8d260c2292fdbc1b7ec5b23cd3bbeb1909bace3 +size 35924 diff --git a/data/2025/2504_15xxx/2504.15777/images/7687d000e20a32466588a78df3334c316590d1f20e9a7d47bf25e95f2358b2fc.jpg b/data/2025/2504_15xxx/2504.15777/images/7687d000e20a32466588a78df3334c316590d1f20e9a7d47bf25e95f2358b2fc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d35269fd9b2ef5e57956bcb94d28996665187fc1 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/7687d000e20a32466588a78df3334c316590d1f20e9a7d47bf25e95f2358b2fc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26480ae290665522d9653f53fafeeb3398113b6eb73835320c0495ef7d15c0a8 +size 30626 diff --git a/data/2025/2504_15xxx/2504.15777/images/7b908d3846cc3b7d51a45a7110eb4424a76ec798139377eb728451a8b9a8038e.jpg b/data/2025/2504_15xxx/2504.15777/images/7b908d3846cc3b7d51a45a7110eb4424a76ec798139377eb728451a8b9a8038e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ae74774af301d16b223f8687fd57d90c673a2e3 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/7b908d3846cc3b7d51a45a7110eb4424a76ec798139377eb728451a8b9a8038e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62be97bc722b1176359920ffd2771e3fabb160371b83a1a1606f6349e3edb661 +size 248521 diff --git a/data/2025/2504_15xxx/2504.15777/images/7c0cee9452adf0935fd7975a14037e4d33ede03232890ec890460647a16fa1c2.jpg b/data/2025/2504_15xxx/2504.15777/images/7c0cee9452adf0935fd7975a14037e4d33ede03232890ec890460647a16fa1c2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b5796ec3ab76c21760dbc142d1b26b8d68b52dcf --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/7c0cee9452adf0935fd7975a14037e4d33ede03232890ec890460647a16fa1c2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8789c881565b12de5dd83463e9ba2ab2f4c7cd3a629950fdad21109ad02f300f +size 36186 diff --git a/data/2025/2504_15xxx/2504.15777/images/8360271f6e05414bddb8422616944a29e0ee5c6272108bd1fee1ef9fbeefeb86.jpg b/data/2025/2504_15xxx/2504.15777/images/8360271f6e05414bddb8422616944a29e0ee5c6272108bd1fee1ef9fbeefeb86.jpg new file mode 100644 index 0000000000000000000000000000000000000000..271019684da068459627cddd79fc130ab1b63470 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/8360271f6e05414bddb8422616944a29e0ee5c6272108bd1fee1ef9fbeefeb86.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1b7bdd07c6ce2dd1207144df62e32e918340fa12458c4f64b852554adf55b58 +size 101789 diff --git a/data/2025/2504_15xxx/2504.15777/images/87ea49257f6941cd6e4541f776c070114d4a35cf437f8e9258c5111a25444c07.jpg b/data/2025/2504_15xxx/2504.15777/images/87ea49257f6941cd6e4541f776c070114d4a35cf437f8e9258c5111a25444c07.jpg new file mode 100644 index 0000000000000000000000000000000000000000..396dafc097cb8d5f2b853bdf8b55ddb71621c38f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/87ea49257f6941cd6e4541f776c070114d4a35cf437f8e9258c5111a25444c07.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fd91af88a42bcbf9d09ff4b1212db10869df16e955e68209adafffca3b30ed4 +size 48724 diff --git a/data/2025/2504_15xxx/2504.15777/images/8e7d37997e4454107c9b3f1d5d83cd73d2c4e31cb6d40d54edf747a32eeab3c1.jpg b/data/2025/2504_15xxx/2504.15777/images/8e7d37997e4454107c9b3f1d5d83cd73d2c4e31cb6d40d54edf747a32eeab3c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..47d9467b4a5497cf3a749e397486922acb4c168f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/8e7d37997e4454107c9b3f1d5d83cd73d2c4e31cb6d40d54edf747a32eeab3c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:529ea1644dd54f68192ec05c0d1c02410fa744e40e2f89dd5a2941718da70518 +size 21683 diff --git a/data/2025/2504_15xxx/2504.15777/images/919f8c76c34c77a7b4d22d069a3190f1356853252c2e8e191e1d7805afc3a619.jpg b/data/2025/2504_15xxx/2504.15777/images/919f8c76c34c77a7b4d22d069a3190f1356853252c2e8e191e1d7805afc3a619.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f67137cef97be72d75abacc3d7dead8524de06f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/919f8c76c34c77a7b4d22d069a3190f1356853252c2e8e191e1d7805afc3a619.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7a522e36e219295e376ad2322e84b4db36e05801463c9d089e2adf11936c989 +size 35388 diff --git a/data/2025/2504_15xxx/2504.15777/images/92d66da5abf488c5b52ee99dc185b1dc09d8af4a68e2fbe8d2d7f207a0009495.jpg b/data/2025/2504_15xxx/2504.15777/images/92d66da5abf488c5b52ee99dc185b1dc09d8af4a68e2fbe8d2d7f207a0009495.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a6eaa86b4ca78e7fd88a5e6180b1515210d4c285 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/92d66da5abf488c5b52ee99dc185b1dc09d8af4a68e2fbe8d2d7f207a0009495.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7de4e8cd85ff9bc93247bd5529c5c599b74b65f021668969381f1d49ef7b3bf +size 36333 diff --git a/data/2025/2504_15xxx/2504.15777/images/93c8ae4fc2db00f11f3c86a37f454a2dd65aef08a5cee4ddac7578f774212764.jpg b/data/2025/2504_15xxx/2504.15777/images/93c8ae4fc2db00f11f3c86a37f454a2dd65aef08a5cee4ddac7578f774212764.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1ae00fbbd48f3a16338cf5e515c408f1dad878b9 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/93c8ae4fc2db00f11f3c86a37f454a2dd65aef08a5cee4ddac7578f774212764.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:875dc8968933782c8f01b504ceab1c922efc51fcf2fbb6c643b12e867272aaef +size 44307 diff --git a/data/2025/2504_15xxx/2504.15777/images/94b7e9fa1ffeecc96b0ef41ea51000b26d02f97e7c1e3593294689e973f839ad.jpg b/data/2025/2504_15xxx/2504.15777/images/94b7e9fa1ffeecc96b0ef41ea51000b26d02f97e7c1e3593294689e973f839ad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d7adac756cf8e816dc068bc942a3d5e639587b2 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/94b7e9fa1ffeecc96b0ef41ea51000b26d02f97e7c1e3593294689e973f839ad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:932fc05ba093c33e72593bcee3a8fd56eb5c444c97f29b53b2f066137eaad6f5 +size 22108 diff --git a/data/2025/2504_15xxx/2504.15777/images/94c0a15fc368a7728149a7406671dbf227cdd83e77da83147f99fd9a06986856.jpg b/data/2025/2504_15xxx/2504.15777/images/94c0a15fc368a7728149a7406671dbf227cdd83e77da83147f99fd9a06986856.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f464b4f6f999977d2d52175711ee6fd3b81ab169 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/94c0a15fc368a7728149a7406671dbf227cdd83e77da83147f99fd9a06986856.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1b24c0556228d17d5c7c62790d04615019d643747861f3301d4689d945f603d +size 44111 diff --git a/data/2025/2504_15xxx/2504.15777/images/9963c4c73368a08be9b4ba416a12dbe2b4ef4ec1b7263f7133125609784b4ce9.jpg b/data/2025/2504_15xxx/2504.15777/images/9963c4c73368a08be9b4ba416a12dbe2b4ef4ec1b7263f7133125609784b4ce9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..22e1afe9b4f24ad5aec53626d96ba905de291e69 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/9963c4c73368a08be9b4ba416a12dbe2b4ef4ec1b7263f7133125609784b4ce9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f66afa561119aeb24f29c2ba4fecf9b07cb3465dacb63c51063813c021049ab +size 35913 diff --git a/data/2025/2504_15xxx/2504.15777/images/9cea77735d3cf6c3ebdecad0073a818a5f4f1cca13aa643d18fce481c4540429.jpg b/data/2025/2504_15xxx/2504.15777/images/9cea77735d3cf6c3ebdecad0073a818a5f4f1cca13aa643d18fce481c4540429.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bf58c2f9e636d62da374bb1be7e12a897eeefe5e --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/9cea77735d3cf6c3ebdecad0073a818a5f4f1cca13aa643d18fce481c4540429.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64d13e28e52d849943e712b6bcbd84d5ff6622b69f1f4e9d580551546babd302 +size 40965 diff --git a/data/2025/2504_15xxx/2504.15777/images/a253375ed3cfaee7d217e3d0f9b8bbed5a508724cc1b432063d0e1b796a0d874.jpg b/data/2025/2504_15xxx/2504.15777/images/a253375ed3cfaee7d217e3d0f9b8bbed5a508724cc1b432063d0e1b796a0d874.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb8d00381017982419946a937a8886ccccbd31f8 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/a253375ed3cfaee7d217e3d0f9b8bbed5a508724cc1b432063d0e1b796a0d874.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30d5417dba1e67c37a44b141aba05c98ef622378aea45f6a369185cc307a0d2d +size 40632 diff --git a/data/2025/2504_15xxx/2504.15777/images/a288133d71b7c9dc147e7ee5f78170f5e2d79b32a4c40cea023285708958f07c.jpg b/data/2025/2504_15xxx/2504.15777/images/a288133d71b7c9dc147e7ee5f78170f5e2d79b32a4c40cea023285708958f07c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30d4cb6a2cd82d6dd600830f5595939644e5fcd7 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/a288133d71b7c9dc147e7ee5f78170f5e2d79b32a4c40cea023285708958f07c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea0ccbf283e6984faba360ed23b13fe5b7b380e8f47f589f775ec73dd88d53d5 +size 42065 diff --git a/data/2025/2504_15xxx/2504.15777/images/a680baf93b2b90175aa50f3beb3cff201d9162fda811c1a9da2a5e11491e4d83.jpg b/data/2025/2504_15xxx/2504.15777/images/a680baf93b2b90175aa50f3beb3cff201d9162fda811c1a9da2a5e11491e4d83.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b845004fc482ad677cfd986102ac04bd9d99a0d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/a680baf93b2b90175aa50f3beb3cff201d9162fda811c1a9da2a5e11491e4d83.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0573f0df50260f02dfdddf459995957df7ac47de361698dbf5033f821f199eeb +size 42061 diff --git a/data/2025/2504_15xxx/2504.15777/images/a927aeddf98e9e4ddcd83a2c9de8d7f68786d344059baf05973d53f2a46f7f94.jpg b/data/2025/2504_15xxx/2504.15777/images/a927aeddf98e9e4ddcd83a2c9de8d7f68786d344059baf05973d53f2a46f7f94.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bc129e303b06abbbc22ca04ee31d92723e3fff43 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/a927aeddf98e9e4ddcd83a2c9de8d7f68786d344059baf05973d53f2a46f7f94.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef38150a85bb1466043c5ce14954b48dfd74038728e603793e1b4bc9f62393e6 +size 39854 diff --git a/data/2025/2504_15xxx/2504.15777/images/a9ee0ea5f785ef391938e14609d9a8376e65fd1348f8d7554bc57edaae2fac3a.jpg b/data/2025/2504_15xxx/2504.15777/images/a9ee0ea5f785ef391938e14609d9a8376e65fd1348f8d7554bc57edaae2fac3a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c063630f5b0cee1e425ada343b90bc77aaf07e72 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/a9ee0ea5f785ef391938e14609d9a8376e65fd1348f8d7554bc57edaae2fac3a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:486e2cb1e0129851ead509ef525ada736e3d35ecc0ce75c9328ab1da57e616ca +size 158486 diff --git a/data/2025/2504_15xxx/2504.15777/images/b191a5bf97faa7c45c1fee1dc686ecc9bfe74d6f70924690bd7ccbbb418b18d1.jpg b/data/2025/2504_15xxx/2504.15777/images/b191a5bf97faa7c45c1fee1dc686ecc9bfe74d6f70924690bd7ccbbb418b18d1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..47c9a071b29e44e95a90f34a6082dfc012e3f7a5 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/b191a5bf97faa7c45c1fee1dc686ecc9bfe74d6f70924690bd7ccbbb418b18d1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e33908ebfc7a2caf452829d8b4749ffdbc0cf220b1c4928e37fc75305a0f687 +size 38838 diff --git a/data/2025/2504_15xxx/2504.15777/images/b4fc11af2a42cf76a840b1a3932aee83b2dd2f3789d5b131300587c7d4a21f96.jpg b/data/2025/2504_15xxx/2504.15777/images/b4fc11af2a42cf76a840b1a3932aee83b2dd2f3789d5b131300587c7d4a21f96.jpg new file mode 100644 index 0000000000000000000000000000000000000000..adc9c1d68011cea6fff94e87110b5d1c2283ac02 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/b4fc11af2a42cf76a840b1a3932aee83b2dd2f3789d5b131300587c7d4a21f96.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8056cef12f6275215be881c16f30b066d01df512397f71797f05f88bde7ebcf6 +size 102335 diff --git a/data/2025/2504_15xxx/2504.15777/images/bbe93058d5e119281dbe2a2a7453e28bb4b576cf301184abcc57d8ced53d3f62.jpg b/data/2025/2504_15xxx/2504.15777/images/bbe93058d5e119281dbe2a2a7453e28bb4b576cf301184abcc57d8ced53d3f62.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7e6340e3efc4a4eb04e22af06aa18ef5371ab83 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/bbe93058d5e119281dbe2a2a7453e28bb4b576cf301184abcc57d8ced53d3f62.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8731807be6b1d35a60cf63ac88e206a22fb0d640215a0801290ee01af52fce5d +size 38016 diff --git a/data/2025/2504_15xxx/2504.15777/images/bc4af8c268796a19b9e7ca41828353aa9b004373143e6b9529eeeb8ed4804fab.jpg b/data/2025/2504_15xxx/2504.15777/images/bc4af8c268796a19b9e7ca41828353aa9b004373143e6b9529eeeb8ed4804fab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b101942706492a18fdb937e3610ed3c14e43e8a --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/bc4af8c268796a19b9e7ca41828353aa9b004373143e6b9529eeeb8ed4804fab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ea4f8988d3ad4a9f90aa6b390aac4d80dcaa3d7535aa3ce98ee272fb2846340 +size 41022 diff --git a/data/2025/2504_15xxx/2504.15777/images/bc9a0f1c786c904a3f69ed882f4cf22f1a6a30c05c7bf904d4badae2ee5de727.jpg b/data/2025/2504_15xxx/2504.15777/images/bc9a0f1c786c904a3f69ed882f4cf22f1a6a30c05c7bf904d4badae2ee5de727.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b4bb42d3ee55ccf174ac480ed659aa8d40736bd --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/bc9a0f1c786c904a3f69ed882f4cf22f1a6a30c05c7bf904d4badae2ee5de727.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bef5a652f250215e52a665adeeb643fa013799a6ffdabf86a53a7b1b68b7fad4 +size 42052 diff --git a/data/2025/2504_15xxx/2504.15777/images/bf53269452a3162272dd632626aa50af3f1678181b6f9d0ba4c95990a34126d1.jpg b/data/2025/2504_15xxx/2504.15777/images/bf53269452a3162272dd632626aa50af3f1678181b6f9d0ba4c95990a34126d1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..732486f3a3770f235b631f5c4c0febee8e5ce79e --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/bf53269452a3162272dd632626aa50af3f1678181b6f9d0ba4c95990a34126d1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fddc2a60dd6758c23f66b51bc8a9a965c707c2a29338fd25832b0b1d2976fff +size 13231 diff --git a/data/2025/2504_15xxx/2504.15777/images/bf53e4cf04bfe1cc8a3830d64c251087ca403806e1f7125338cfdeb5cdd2d04b.jpg b/data/2025/2504_15xxx/2504.15777/images/bf53e4cf04bfe1cc8a3830d64c251087ca403806e1f7125338cfdeb5cdd2d04b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5c464201a789207fcfbcfbbdd282ac3691fcb30f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/bf53e4cf04bfe1cc8a3830d64c251087ca403806e1f7125338cfdeb5cdd2d04b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:827a27dde60818ec051ba8474adf9ebb143b3434c89ee9c58e3ba5ebe1123f68 +size 42284 diff --git a/data/2025/2504_15xxx/2504.15777/images/c58552ebaf2daf6eabfa675f030cd3a157abeb8ab14396bafc107c507f8547bd.jpg b/data/2025/2504_15xxx/2504.15777/images/c58552ebaf2daf6eabfa675f030cd3a157abeb8ab14396bafc107c507f8547bd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f1f28e82a3c709161ed35508faa7964a1e61d15f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/c58552ebaf2daf6eabfa675f030cd3a157abeb8ab14396bafc107c507f8547bd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88eb7c74e36fde7418089555559a7327fbd43c92653fa077e73f38aefb371d3a +size 13961 diff --git a/data/2025/2504_15xxx/2504.15777/images/cb82a48a302ec32484b525d394a3919acf1c3e6f8775d6f31572654513eb4bf8.jpg b/data/2025/2504_15xxx/2504.15777/images/cb82a48a302ec32484b525d394a3919acf1c3e6f8775d6f31572654513eb4bf8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..edc873387c7965ddd798454c3187e865ac9a4eba --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/cb82a48a302ec32484b525d394a3919acf1c3e6f8775d6f31572654513eb4bf8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fd1e46c9160d77b981c1d34e31263e687e8ccfa985cbf16015ccc1fe0766e4e +size 42518 diff --git a/data/2025/2504_15xxx/2504.15777/images/d0a6b65304f02a110defa4ef71b15594342b223bbbf05dc73dd44d06fd2fb35c.jpg b/data/2025/2504_15xxx/2504.15777/images/d0a6b65304f02a110defa4ef71b15594342b223bbbf05dc73dd44d06fd2fb35c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..07a545b3145c90c32ae4f75e549b27522d3f3d03 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/d0a6b65304f02a110defa4ef71b15594342b223bbbf05dc73dd44d06fd2fb35c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2a326c173edfcad2916d75b469514c09c71212e2fa42f42153bb0fc8b794d10 +size 37766 diff --git a/data/2025/2504_15xxx/2504.15777/images/d353ef6fc3f55dfc1422f1740441c1355ae02f826e634eac7f3afe7a8106e2b5.jpg b/data/2025/2504_15xxx/2504.15777/images/d353ef6fc3f55dfc1422f1740441c1355ae02f826e634eac7f3afe7a8106e2b5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e36cfb3fcd33d7408b9e26a1bb20c2e77c43bed3 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/d353ef6fc3f55dfc1422f1740441c1355ae02f826e634eac7f3afe7a8106e2b5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:563c83e0efbc74c502947edc50c393aa9161ddce0109fa1488d4c0cd9f81273f +size 45101 diff --git a/data/2025/2504_15xxx/2504.15777/images/d40c018fc96b24189e82fe7ab40c407172146c7f87651959576d69ef64fe1e94.jpg b/data/2025/2504_15xxx/2504.15777/images/d40c018fc96b24189e82fe7ab40c407172146c7f87651959576d69ef64fe1e94.jpg new file mode 100644 index 0000000000000000000000000000000000000000..104d7d806a163652348e7d4b045720c7eeb327c8 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/d40c018fc96b24189e82fe7ab40c407172146c7f87651959576d69ef64fe1e94.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db3d6f2c901012225cf38c60c29944c9c9b763759066d9bd7aa2248da043a7d6 +size 40337 diff --git a/data/2025/2504_15xxx/2504.15777/images/d641c625095868eb4819266432c0c7a072fc31c2f491c3d79d392f1711d5b00e.jpg b/data/2025/2504_15xxx/2504.15777/images/d641c625095868eb4819266432c0c7a072fc31c2f491c3d79d392f1711d5b00e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f17a9a4c2f2d7083a6b215ebce9dfb393e41de5e --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/d641c625095868eb4819266432c0c7a072fc31c2f491c3d79d392f1711d5b00e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:871bee67ab416f270e95856dbac8091b86ffb8c6555769785811df2cc56cb8d9 +size 40769 diff --git a/data/2025/2504_15xxx/2504.15777/images/d67dbf5f5b71c11036cb87eea3eff99ae62340ff38a964f0b90e4e59d337bcbb.jpg b/data/2025/2504_15xxx/2504.15777/images/d67dbf5f5b71c11036cb87eea3eff99ae62340ff38a964f0b90e4e59d337bcbb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..131f28f31f784e36c46385ed896616614b10c79a --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/d67dbf5f5b71c11036cb87eea3eff99ae62340ff38a964f0b90e4e59d337bcbb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:095eee75662f68cbacd4bc4d9a47842e1548db6d33e1e75efe8d5ec777ba8668 +size 95488 diff --git a/data/2025/2504_15xxx/2504.15777/images/d77248ceec0597e3173ad7abb2455e9c6b9bd88d21d80b0ff7244825d3df6bc0.jpg b/data/2025/2504_15xxx/2504.15777/images/d77248ceec0597e3173ad7abb2455e9c6b9bd88d21d80b0ff7244825d3df6bc0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6963ea85c6bd63f22e12e8e46d274daefdd18355 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/d77248ceec0597e3173ad7abb2455e9c6b9bd88d21d80b0ff7244825d3df6bc0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a7ae0bfc8a54be43b1ef30063fd0e39110ab337c6c11ffdf7a2c65bc24f6297 +size 44652 diff --git a/data/2025/2504_15xxx/2504.15777/images/da84d29ddbc030bb2732b675abae652384353c1ddbd7c6ca78bb6eac68830c8a.jpg b/data/2025/2504_15xxx/2504.15777/images/da84d29ddbc030bb2732b675abae652384353c1ddbd7c6ca78bb6eac68830c8a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ea8ca0777f07f66b76547c16afebc9fd70ca5bf8 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/da84d29ddbc030bb2732b675abae652384353c1ddbd7c6ca78bb6eac68830c8a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e72f96e80a13ad9fa3e533318f226a526674ce26546cad1d5a8ef136c0b83890 +size 40886 diff --git a/data/2025/2504_15xxx/2504.15777/images/decc081a621463503dc87bdda5aa04774f28a8003bed84c4134f7d368e1c0ad1.jpg b/data/2025/2504_15xxx/2504.15777/images/decc081a621463503dc87bdda5aa04774f28a8003bed84c4134f7d368e1c0ad1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..07bb46e4c3127a7f53441066fd2c4c8d61812775 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/decc081a621463503dc87bdda5aa04774f28a8003bed84c4134f7d368e1c0ad1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f541bfd50eeeb3b8be332c4f042bfcca24e30974a7cf96b50ddc6281ce48128 +size 145346 diff --git a/data/2025/2504_15xxx/2504.15777/images/e10e05106fdbb6069713e797be132f798dd8dd77c486e0e23785c982fa11ca63.jpg b/data/2025/2504_15xxx/2504.15777/images/e10e05106fdbb6069713e797be132f798dd8dd77c486e0e23785c982fa11ca63.jpg new file mode 100644 index 0000000000000000000000000000000000000000..256c2d320ff6baa8ba3852ef1865b9fbf8af863a --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/e10e05106fdbb6069713e797be132f798dd8dd77c486e0e23785c982fa11ca63.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:825315fcf552b02dc666476309847a76ffeda5fc680bf09dd3b3053954f148c2 +size 21767 diff --git a/data/2025/2504_15xxx/2504.15777/images/e12f87e58a46f826adb346894e3dc14c83b821d5f0533d83abf938406e0ec4c7.jpg b/data/2025/2504_15xxx/2504.15777/images/e12f87e58a46f826adb346894e3dc14c83b821d5f0533d83abf938406e0ec4c7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..28e8eb6b3a480eefb116a98ef75a20ef4daf82bd --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/e12f87e58a46f826adb346894e3dc14c83b821d5f0533d83abf938406e0ec4c7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcd194416a6de80225c1feb66e5d1a62036dc476c5cd5e413b6acf668a10a71b +size 41802 diff --git a/data/2025/2504_15xxx/2504.15777/images/e163250b6fb559cde051ed6f39a9dabe05f4228399aa036ce5a4958d8e4ae7d9.jpg b/data/2025/2504_15xxx/2504.15777/images/e163250b6fb559cde051ed6f39a9dabe05f4228399aa036ce5a4958d8e4ae7d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4944e5796f76e5a22ab71abd069f23ff2998bd42 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/e163250b6fb559cde051ed6f39a9dabe05f4228399aa036ce5a4958d8e4ae7d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c411ed58fd17951c915f9e84f9092b915c0970e2e44b1c0d64850e28d9710aac +size 10176 diff --git a/data/2025/2504_15xxx/2504.15777/images/e227c2e22316cb4b9419f97f054e7fabe4740818619f91e7c8b4fe40c3152bac.jpg b/data/2025/2504_15xxx/2504.15777/images/e227c2e22316cb4b9419f97f054e7fabe4740818619f91e7c8b4fe40c3152bac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87db93cc146c9f4a4d5a4aa6624d158553c88073 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/e227c2e22316cb4b9419f97f054e7fabe4740818619f91e7c8b4fe40c3152bac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2bec823e071a078123a184e3b308e2e06a8adb9314c55bd196a0fbd351452050 +size 42667 diff --git a/data/2025/2504_15xxx/2504.15777/images/e26a69ce1c5d5ce240e9dc3cff8e42ad98280111c18653b9dbec50810ca60eca.jpg b/data/2025/2504_15xxx/2504.15777/images/e26a69ce1c5d5ce240e9dc3cff8e42ad98280111c18653b9dbec50810ca60eca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a82f58a4be612757db08a551a66cc01ca82334d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/e26a69ce1c5d5ce240e9dc3cff8e42ad98280111c18653b9dbec50810ca60eca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60982dce9afbd0209339c3ef3c6969a3293bb9021736c717af1b60c59c903eba +size 21655 diff --git a/data/2025/2504_15xxx/2504.15777/images/e293c0018a412234e72b8b60b87a9e3dca0699425d5b9a868e7d6995c7a461f3.jpg b/data/2025/2504_15xxx/2504.15777/images/e293c0018a412234e72b8b60b87a9e3dca0699425d5b9a868e7d6995c7a461f3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..49a267fe5e7da4186b94b52fc169a223e6621774 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/e293c0018a412234e72b8b60b87a9e3dca0699425d5b9a868e7d6995c7a461f3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d6bb32438aaaa502c85ae0b5cf4c3d26b9e1e18387ce040b8bdbad804f268b2 +size 38174 diff --git a/data/2025/2504_15xxx/2504.15777/images/e30c2021c32c397470ab042d61912768d1d764516c4830521a738697490ba23f.jpg b/data/2025/2504_15xxx/2504.15777/images/e30c2021c32c397470ab042d61912768d1d764516c4830521a738697490ba23f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a8a899ddebde31f6ec7c4a3da61b659fa0e1996 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/e30c2021c32c397470ab042d61912768d1d764516c4830521a738697490ba23f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08eb20a8aabb1dc48abba075e1db38e39d23c88bd92c84e2ea52a12bce1f0016 +size 45962 diff --git a/data/2025/2504_15xxx/2504.15777/images/e7251e5046f2a5f6cb05b876b7fc39023bdfb9a1ab1a6d709a9ce612c7eaab5a.jpg b/data/2025/2504_15xxx/2504.15777/images/e7251e5046f2a5f6cb05b876b7fc39023bdfb9a1ab1a6d709a9ce612c7eaab5a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..806dc0e4a7d0a01da3be1272ce0078d3b7cd78e7 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/e7251e5046f2a5f6cb05b876b7fc39023bdfb9a1ab1a6d709a9ce612c7eaab5a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6a4fb3cbbb3003f066599633940dfc583da0fd1c138960a40db14af45bef509 +size 13666 diff --git a/data/2025/2504_15xxx/2504.15777/images/ed65f9fd40831f8b3283fbd88857fbde1efe7a1a747af3aad5020ea0ab07ecfb.jpg b/data/2025/2504_15xxx/2504.15777/images/ed65f9fd40831f8b3283fbd88857fbde1efe7a1a747af3aad5020ea0ab07ecfb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8e1db22fd4d579afe16fc601b575cd74b4e148f5 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/ed65f9fd40831f8b3283fbd88857fbde1efe7a1a747af3aad5020ea0ab07ecfb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cb3a962f7c0df269932c589359e737861475fb245da74bf5747cb182cdea82e +size 23192 diff --git a/data/2025/2504_15xxx/2504.15777/images/f3bb7eda6f0c5f44a8d6892ff46ddcd98d6c1bdaa0dba782f01b360e1e83989e.jpg b/data/2025/2504_15xxx/2504.15777/images/f3bb7eda6f0c5f44a8d6892ff46ddcd98d6c1bdaa0dba782f01b360e1e83989e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7d377929df6180ce2a76d2f5ad952d77999cc73 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/f3bb7eda6f0c5f44a8d6892ff46ddcd98d6c1bdaa0dba782f01b360e1e83989e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b27c8dac203e91300b2424b049d970421b4b2aa0346d87444b95c133bc723d40 +size 148082 diff --git a/data/2025/2504_15xxx/2504.15777/images/f4186b7a62fdd3d663ffed363a38e286da6614aad9564a13f40b27a02c1f824c.jpg b/data/2025/2504_15xxx/2504.15777/images/f4186b7a62fdd3d663ffed363a38e286da6614aad9564a13f40b27a02c1f824c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c5e96af85549becac81b4d411194bc476ccdbe0d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/f4186b7a62fdd3d663ffed363a38e286da6614aad9564a13f40b27a02c1f824c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf3863ba2c02814a37087ac58587efe791457e404fc27db1431cf796478d9f22 +size 36309 diff --git a/data/2025/2504_15xxx/2504.15777/images/f7e24a2670d10d97ac1743803388f0f914b52e3adf5a7d8bb766e74bbc93cbba.jpg b/data/2025/2504_15xxx/2504.15777/images/f7e24a2670d10d97ac1743803388f0f914b52e3adf5a7d8bb766e74bbc93cbba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3974ae7002e1f2353a95182fcb844bbe0fd1d96b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/f7e24a2670d10d97ac1743803388f0f914b52e3adf5a7d8bb766e74bbc93cbba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d5a5622fd3564e413b47f13a0f92284a7302e6a8e94676c337b341f7b96eeb5 +size 75471 diff --git a/data/2025/2504_15xxx/2504.15777/images/f82684fd818799f06d8422102e17ce5a54eb80cd17d33223ef24703ede35a673.jpg b/data/2025/2504_15xxx/2504.15777/images/f82684fd818799f06d8422102e17ce5a54eb80cd17d33223ef24703ede35a673.jpg new file mode 100644 index 0000000000000000000000000000000000000000..79c52608c7b84c06b77575ed4687ce09014ba6dc --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/images/f82684fd818799f06d8422102e17ce5a54eb80cd17d33223ef24703ede35a673.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b3d50a3517fb879589c373703990c75c7228b92e1997642e995d9dde10a1feb +size 22424 diff --git a/data/2025/2504_15xxx/2504.15777/layout.json b/data/2025/2504_15xxx/2504.15777/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..25d05634eeda506393ced9147e5a263edb889dfe --- /dev/null +++ b/data/2025/2504_15xxx/2504.15777/layout.json @@ -0,0 +1,13789 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 115, + 84, + 474, + 108 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 84, + 474, + 108 + ], + "spans": [ + { + "bbox": [ + 115, + 84, + 474, + 108 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 118, + 466, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 118, + 466, + 147 + ], + "spans": [ + { + "bbox": [ + 121, + 118, + 466, + 147 + ], + "type": "text", + "content": "Shangshang Wang1, Julian Asilis1, Ömer Faruk Akgül1, Enes Burak Bilgin1, Ollie Liu1, and Willie Neiswanger1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 217, + 154, + 368, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 154, + 368, + 169 + ], + "spans": [ + { + "bbox": [ + 217, + 154, + 368, + 169 + ], + "type": "text", + "content": "1University of Southern California" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 182, + 542, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 182, + 542, + 379 + ], + "spans": [ + { + "bbox": [ + 67, + 182, + 542, + 379 + ], + "type": "text", + "content": "How cost-effectively can strong reasoning abilities be achieved in language models? Driven by this fundamental question, we present Tina, a family of tiny reasoning models achieved with high cost-efficiency. Notably, Tina demonstrates that substantial reasoning performance can be developed using only minimal resources, by applying parameter-efficient updates during reinforcement learning (RL), using low-rank adaptation (LoRA), to an already tiny 1.5B parameter base model. This minimalist approach produces models that achieve reasoning performance which is competitive with, and sometimes surpasses, SOTA RL reasoning models built upon the same base model. Crucially, this is achieved at a tiny fraction of the computational post-training cost employed by existing SOTA models. In fact, the best Tina model achieves a " + }, + { + "bbox": [ + 67, + 182, + 542, + 379 + ], + "type": "inline_equation", + "content": ">20\\%" + }, + { + "bbox": [ + 67, + 182, + 542, + 379 + ], + "type": "text", + "content": " reasoning performance increase and " + }, + { + "bbox": [ + 67, + 182, + 542, + 379 + ], + "type": "inline_equation", + "content": "43.33\\%" + }, + { + "bbox": [ + 67, + 182, + 542, + 379 + ], + "type": "text", + "content": " Pass@1 accuracy on AIME24, at only $9 USD post-training and evaluation cost (i.e., an estimated 260x cost reduction). Our work reveals the surprising effectiveness of efficient RL reasoning via LoRA. We validate this across multiple open-source reasoning datasets and various ablation settings starting with a single, fixed set of hyperparameters. Furthermore, we hypothesize that this effectiveness and efficiency stem from LoRA rapidly adapting the model to the structural format of reasoning rewarded by RL, while largely preserving the base model's underlying knowledge. In service of accessibility and open research, we fully open-source all code, training logs, and model weights & checkpoints." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 392, + 364, + 445 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 69, + 392, + 326, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 392, + 326, + 406 + ], + "spans": [ + { + "bbox": [ + 69, + 392, + 326, + 406 + ], + "type": "text", + "content": "Notion Blog: https://shangshangwang.notion.site/tina" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 406, + 358, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 406, + 358, + 419 + ], + "spans": [ + { + "bbox": [ + 69, + 406, + 358, + 419 + ], + "type": "text", + "content": "Code Repository: https://github.com/shangshang-wang/Tina" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 419, + 356, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 419, + 356, + 432 + ], + "spans": [ + { + "bbox": [ + 69, + 419, + 356, + 432 + ], + "type": "text", + "content": "Training Logs: https://wandb.ai/upup-ashton-wang-usc/Tina" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 432, + 364, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 432, + 364, + 445 + ], + "spans": [ + { + "bbox": [ + 69, + 432, + 364, + 445 + ], + "type": "text", + "content": "Model Weights & Checkpoints: https://huggingface.co/Tina-Yi" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 52, + 480, + 147, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 480, + 147, + 495 + ], + "spans": [ + { + "bbox": [ + 52, + 480, + 147, + 495 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 506, + 561, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 506, + 561, + 698 + ], + "spans": [ + { + "bbox": [ + 49, + 506, + 561, + 698 + ], + "type": "text", + "content": "Language models (LMs) demonstrate increasing proficiency across a variety of tasks, but achieving robust, multi-step reasoning remains a frontier challenge (Wang and Neiswanger, 2025, Xu et al., 2025). Notably, such reasoning abilities are crucial for applications demanding complex problem-solving, from scientific discovery to intricate planning. Enhancing complex reasoning via supervised fine-tuning (SFT) is a well-adopted technique, often utilizing a distillation process (Min et al., 2024, Huang et al., 2024) by which the model learns to mimic reasoning traces (e.g., step-by-step thinking) generated by more advanced models such as o1 (OpenAI, 2024). This approach, while effective, relies upon the quality and availability of such expert demonstrations, which can be costly to obtain. Furthermore, it can run the risk of instilling a shallow form of imitation in the learning model, rather than fostering dynamic exploration of reasoning paths. In contrast, reinforcement learning (RL) enables models to learn directly and flexibly from verifiable reward signals derived from curated data (DeepSeek-AI, 2025, Lambert et al., 2025). In doing so, RL can lead the model to explore a greater variety of logical paths and possibly discover more robust solutions. However, RL pipelines are often complex and notoriously resource-intensive, typically involving substantial compute. This raises a fundamental question anchoring our research:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 710, + 506, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 710, + 506, + 724 + ], + "spans": [ + { + "bbox": [ + 107, + 710, + 506, + 724 + ], + "type": "text", + "content": "How cost-effectively can one perform RL to efficiently instill reasoning abilities in LMs?" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 209, + 37, + 561 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 209, + 37, + 561 + ], + "spans": [ + { + "bbox": [ + 14, + 209, + 37, + 561 + ], + "type": "text", + "content": "arXiv:2504.15777v1 [cs.CL] 22 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 51, + 748, + 424, + 759 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 748, + 424, + 759 + ], + "spans": [ + { + "bbox": [ + 51, + 748, + 424, + 759 + ], + "type": "text", + "content": "Corresponding author(s): Shangshang Wang shangshangwang.github.io; Willie Neiswanger neiswang@usc.edu" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 57, + 86, + 181, + 199 + ], + "blocks": [ + { + "bbox": [ + 57, + 86, + 181, + 199 + ], + "lines": [ + { + "bbox": [ + 57, + 86, + 181, + 199 + ], + "spans": [ + { + "bbox": [ + 57, + 86, + 181, + 199 + ], + "type": "image", + "image_path": "c58552ebaf2daf6eabfa675f030cd3a157abeb8ab14396bafc107c507f8547bd.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 204, + 560, + 243 + ], + "lines": [ + { + "bbox": [ + 50, + 204, + 560, + 243 + ], + "spans": [ + { + "bbox": [ + 50, + 204, + 560, + 243 + ], + "type": "text", + "content": "Figure 1: Overall comparison between Tina and baseline models. The Tina model in the figure corresponds to the best checkpoint in Table 10. Reasoning performance denotes the average score across AIME24/25, AMC23, MATH500, GPQA, and Minerva, as described in Section 3. The calculation of each comparative metric is detailed in Appendix A." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 182, + 86, + 306, + 200 + ], + "blocks": [ + { + "bbox": [ + 182, + 86, + 306, + 200 + ], + "lines": [ + { + "bbox": [ + 182, + 86, + 306, + 200 + ], + "spans": [ + { + "bbox": [ + 182, + 86, + 306, + 200 + ], + "type": "image", + "image_path": "bf53269452a3162272dd632626aa50af3f1678181b6f9d0ba4c95990a34126d1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 307, + 86, + 430, + 200 + ], + "blocks": [ + { + "bbox": [ + 307, + 86, + 430, + 200 + ], + "lines": [ + { + "bbox": [ + 307, + 86, + 430, + 200 + ], + "spans": [ + { + "bbox": [ + 307, + 86, + 430, + 200 + ], + "type": "image", + "image_path": "e7251e5046f2a5f6cb05b876b7fc39023bdfb9a1ab1a6d709a9ce612c7eaab5a.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 432, + 85, + 553, + 200 + ], + "blocks": [ + { + "bbox": [ + 432, + 85, + 553, + 200 + ], + "lines": [ + { + "bbox": [ + 432, + 85, + 553, + 200 + ], + "spans": [ + { + "bbox": [ + 432, + 85, + 553, + 200 + ], + "type": "image", + "image_path": "6c6d44847f89dd7f528dfee2e033953d575eac2bf14e954d1c4d9a807378e5d6.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 254, + 561, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 254, + 561, + 472 + ], + "spans": [ + { + "bbox": [ + 52, + 254, + 561, + 472 + ], + "type": "text", + "content": "Our pursuit of this question necessitates a deliberate move towards minimalism. Rather than utilizing models with tens of billions of parameters (such as Qwen-7B/32B, QwQ-32B-preview, and their variants (Min et al., 2024, NovaSky Team, 2025, Zeng et al., 2025, Muennighoff et al., 2025, Cui et al., 2025, Lyu et al., 2025, OpenThoughts Team, 2025, Hu et al., 2025)), we instead direct our attention to tiny models. In particular, we use the 1.5B parameter model, DeepSeek-R1-Distill-Qwen-1.5B (DeepSeek-AI, 2025). Our choice of this base model aligns with common practices in recent research (RUCAIBox STILL Team, 2025, Luo et al., 2025, Dang and Ngo, 2025): we begin with a foundation that, owing to its specific lineage (DeepSeek/Qwen) and distillation process, likely possesses stronger initial reasoning aptitude compared to a generic pre-trained model of equivalent size. This strategic starting point allows us to more-rigorously evaluate the incremental reasoning enhancements imparted by RL, thereby isolating and measuring the effectiveness of the technique itself over a competent baseline. More importantly, selecting such an architecture dramatically lowers the computational and financial threshold for experimentation. Complementing the choice of a compact base model, we further amplify efficiency during the RL phase and integrate parameter-efficient post-training by employing low-rank adaptation (LoRA) (Hu et al., 2021). Notably, LoRA enables the modification of a model's behavior by training only an exceptionally small number of new parameters. This dovetails with our central motivation: achieving reasoning capabilities through the most economical means possible." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 478, + 560, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 478, + 560, + 519 + ], + "spans": [ + { + "bbox": [ + 50, + 478, + 560, + 519 + ], + "type": "text", + "content": "Integrating the previous two components—a “tiny” model architecture and a “tiny” post-training via LoRA-based RL—we release the Tina (Tiny Reasoning Models via LoRA) family of models, which attain substantial reasoning performance at strikingly low cost. In total, we summarize our contributions as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 59, + 525, + 559, + 708 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 59, + 525, + 558, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 525, + 558, + 582 + ], + "spans": [ + { + "bbox": [ + 59, + 525, + 558, + 582 + ], + "type": "text", + "content": "- Surprising Effectiveness of Efficient RL Reasoning. We show that our Tina models achieve performance competitive with, and in some cases even superior to, SOTA baseline models built on the same base model with full-parameter training, as shown in Figure 1 and in more detail in Table 3. In particular, the best Tina model achieves a " + }, + { + "bbox": [ + 59, + 525, + 558, + 582 + ], + "type": "inline_equation", + "content": ">20\\%" + }, + { + "bbox": [ + 59, + 525, + 558, + 582 + ], + "type": "text", + "content": " performance increase and " + }, + { + "bbox": [ + 59, + 525, + 558, + 582 + ], + "type": "inline_equation", + "content": "43.33\\%" + }, + { + "bbox": [ + 59, + 525, + 558, + 582 + ], + "type": "text", + "content": " Pass@1 accuracy on AIME24." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 59, + 586, + 559, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 586, + 559, + 708 + ], + "spans": [ + { + "bbox": [ + 59, + 586, + 559, + 708 + ], + "type": "text", + "content": "- Rapid Reasoning Format Adaptation Hypothesis. Based on our observations in post-training Tina, we hypothesize that LoRA's effectiveness and efficiency stem from rapidly adapting the reasoning format under RL while preserving base model knowledge—a likely more compute-efficient process than the deep knowledge integration of full-parameter training. Partial support comes from studies showing tiny LMs can reason effectively (Hugging Face, 2025, DeepSeek-AI, 2025), while large LMs can store broader world knowledge (Allen-Zhu and Li, 2025). This distinction suggests reasoning capabilities can be significantly enhanced by focusing on adapting the output format itself, consistent with our hypothesis about LoRA. To test this, we exclusively train LoRA parameters in RL settings, focusing on leveraging this format adaptation mechanism." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 552, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 552, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 552, + 750, + 558, + 759 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 63, + 83, + 547, + 273 + ], + "blocks": [ + { + "bbox": [ + 63, + 83, + 547, + 273 + ], + "lines": [ + { + "bbox": [ + 63, + 83, + 547, + 273 + ], + "spans": [ + { + "bbox": [ + 63, + 83, + 547, + 273 + ], + "type": "image", + "image_path": "f7e24a2670d10d97ac1743803388f0f914b52e3adf5a7d8bb766e74bbc93cbba.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 285, + 560, + 312 + ], + "lines": [ + { + "bbox": [ + 50, + 285, + 560, + 312 + ], + "spans": [ + { + "bbox": [ + 50, + 285, + 560, + 312 + ], + "type": "text", + "content": "Figure 2: Release timeline of open-source models that aim to replicate the performance of advanced reasoning models like o1(-preview) (OpenAI, 2024) and R1 (DeepSeek-AI, 2025), which we refer to as open-source reasoning replicas." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 59, + 331, + 561, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 331, + 561, + 413 + ], + "spans": [ + { + "bbox": [ + 59, + 331, + 561, + 413 + ], + "type": "text", + "content": "- Democratizing RL Reasoning. We provide a reproducible and highly cost-effective approach, enabling wider participation in the exploration of RL techniques without requiring extensive computational resources. Notably, the cost of reproducing the best Tina checkpoint stands at only " + }, + { + "bbox": [ + 59, + 331, + 561, + 413 + ], + "type": "inline_equation", + "content": "9, and of reproducing all our experiments and everything presented in this paper from scratch at" + }, + { + "bbox": [ + 59, + 331, + 561, + 413 + ], + "type": "text", + "content": "526. Furthermore, in line with our goal of promoting accessible research, we release all code, training logs, evaluation scripts, and all Tina checkpoints." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 431, + 153, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 431, + 153, + 445 + ], + "spans": [ + { + "bbox": [ + 51, + 431, + 153, + 445 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 459, + 244, + 473 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 459, + 244, + 473 + ], + "spans": [ + { + "bbox": [ + 51, + 459, + 244, + 473 + ], + "type": "text", + "content": "2.1. Open-Source Reasoning Replicas" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 480, + 561, + 699 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 480, + 561, + 699 + ], + "spans": [ + { + "bbox": [ + 50, + 480, + 561, + 699 + ], + "type": "text", + "content": "As shown in Figure 2, following the release of o1-preview (OpenAI, 2024), a number of open-source models have emerged to replicate or exceed its reasoning capabilities. STILL (Min et al., 2024) introduced a minimal yet high-quality training recipe designed to elicit reasoning with modest compute, demonstrating that imitation learning from curated traces remains competitive. Sky-T1 (NovaSky Team, 2025) further explored scaling using open instruction-tuned checkpoints, while SimpleRL (Zeng et al., 2025) highlighted the potential of lightweight RL without requiring large-scale reward models. PRIME (Cui et al., 2025) and DeepScaleR (Luo et al., 2025) introduced process supervision and scaling experiments to isolate how reasoning quality evolves with model size and context length. s1 (Muennighoff et al., 2025) showed that even strong base models such as Qwen2.5-32B-Instruct benefit from fine-tuning on only 1k high-quality and long chain-of-thought data, which is curated to elicit reasoning capabilities. L1 (Aggarwal and Welleck, 2025) combined prompt engineering with data curation for RL, resulting in models that can efficiently and adaptively control their response length. Meanwhile, OREAL (Lyu et al., 2025) and OpenThinker (OpenThoughts Team, 2025) investigated self-correction and latent structure emergence through unsupervised and hybrid paradigms. The release of Open Reasoner Zero (Hu et al., 2025) and Open-RS (Dang and Ngo, 2025) further emphasized efficient RL-based strategies for reasoning with small models, completing a landscape of public alternatives for interpretability and reproducibility." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 552, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 552, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 552, + 750, + 558, + 759 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 85, + 215, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 85, + 215, + 99 + ], + "spans": [ + { + "bbox": [ + 51, + 85, + 215, + 99 + ], + "type": "text", + "content": "2.2. RL with Verifiable Rewards" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 107, + 561, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 107, + 561, + 285 + ], + "spans": [ + { + "bbox": [ + 50, + 107, + 561, + 285 + ], + "type": "text", + "content": "Reasoning tasks are well-suited to RL paradigms, as the correctness or quality of the final output often provides verifiable reward signals (e.g., the validity of a logical deduction). Such signal can effectively guide the model towards learning more robust reasoning strategies. Consequently, various RL approaches have been explored within this domain. Certain methods introduce auxiliary reward models or critics to assess reasoning quality, such as ReFT (Luong et al., 2024) and REFINER (Paul et al., 2024). Other techniques employ explicit rule-based verification for self-correction (Wu et al., 2024). Some leverage self-play dynamics and exploration, such as mutual reasoning (Qi et al., 2024), or utilize inference-aware fine-tuning that optimizes performance under different sampling strategies (Chow et al., 2024). Notably, Group Relative Policy Optimization (GRPO) has been proposed as a variant of Proximal Policy Optimization (PPO) which removes the need for a separate value network by using a group-based baseline for advantage estimation, improving training efficiency and leading to better reward alignment (Shao et al., 2024), as demonstrated by DeepSeek-R1 (DeepSeek-AI, 2025). Subsequently, Dr.GRPO (Liu et al., 2025) introduced a subtle modification of GRPO addressing its bias to produce long responses. For completeness, we provide the standard formulation of GRPO in Appendix B." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 304, + 188, + 317 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 304, + 188, + 317 + ], + "spans": [ + { + "bbox": [ + 51, + 304, + 188, + 317 + ], + "type": "text", + "content": "2.3. Low-Rank Adaptation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 326, + 562, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 326, + 562, + 449 + ], + "spans": [ + { + "bbox": [ + 50, + 326, + 562, + 449 + ], + "type": "text", + "content": "While most existing open models that enable reasoning rely on the more expensive full-parameter training (Min et al., 2024, NovaSky Team, 2025, Zeng et al., 2025, Muennighoff et al., 2025, Aggarwal and Welleck, 2025, Cui et al., 2025, Luo et al., 2025, Lyu et al., 2025, OpenThoughts Team, 2025, Hu et al., 2025, Dang and Ngo, 2025), we investigate the use of LoRA for parameter-efficient post-training of reasoning models (Hu et al., 2021). Our goal is to assess whether updating only a small fraction of parameters can still yield strong reasoning capabilities (Han et al., 2024). In addition to its computational efficiency, LoRA provides modularity: by training only a low-rank decomposition of the parameter updates, it becomes possible to toggle reasoning behavior without maintaining multiple full model copies. For completeness, we provide the standard formulation of LoRA in Appendix B." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 469, + 304, + 484 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 469, + 304, + 484 + ], + "spans": [ + { + "bbox": [ + 51, + 469, + 304, + 484 + ], + "type": "text", + "content": "3. Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 495, + 561, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 495, + 561, + 592 + ], + "spans": [ + { + "bbox": [ + 50, + 495, + 561, + 592 + ], + "type": "text", + "content": "Tina is our family of models created by post-training the DeepSeek-R1-Distill-Qwen-1.5B base model using LoRA during RL (employing a GRPO-style algorithm). The \"Tiny\" designation encapsulates a deliberate focus on minimalism and efficiency across the entire framework. This encompasses not only the tiny base model architecture and the tiny parameter updates enabled by LoRA, but also extends to a tiny overall resource footprint. This minimized footprint is achieved through an efficient training pipeline leveraging accessible open-source datasets and codebase (detailed in Section 3.1), and requires only minimal hardware and budget resources (described in Section 3.2)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 610, + 278, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 610, + 278, + 624 + ], + "spans": [ + { + "bbox": [ + 51, + 610, + 278, + 624 + ], + "type": "text", + "content": "3.1. Training Pipeline: Baselines & Datasets" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 633, + 561, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 633, + 561, + 674 + ], + "spans": [ + { + "bbox": [ + 50, + 633, + 561, + 674 + ], + "type": "text", + "content": "To facilitate meaningful comparisons and enable precise ablations, we post-train our Tina models via RL using the datasets and setups from publicly available reasoning models. All Tina and baseline models adopt DeepSeek-R1-Distill-Qwen-1.5B as their base model checkpoint with default open-source weights." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 59, + 683, + 561, + 725 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 683, + 561, + 725 + ], + "spans": [ + { + "bbox": [ + 59, + 683, + 561, + 725 + ], + "type": "text", + "content": "- STILL-3-1.5B-preview (RUCAIBox STILL Team, 2025) is a slow-thinking reasoning model developed through iterative RL on a curated dataset of " + }, + { + "bbox": [ + 59, + 683, + 561, + 725 + ], + "type": "inline_equation", + "content": "33\\mathrm{k}" + }, + { + "bbox": [ + 59, + 683, + 561, + 725 + ], + "type": "text", + "content": " reasoning traces. The data originates from mathematics competitions and includes problems from MATH (Hendrycks et al., 2021, Lightman et al.," + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 552, + 751, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 552, + 751, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 552, + 751, + 558, + 759 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 85, + 560, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 85, + 560, + 113 + ], + "spans": [ + { + "bbox": [ + 69, + 85, + 560, + 113 + ], + "type": "text", + "content": "2023), NuminaMathCoT (LI et al., 2024), and AIME (1983-2023) (Art of Problem Solving, 2024). Tina-STILL-3-1.5B-preview uses the same dataset and reward pipeline." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 59, + 118, + 561, + 260 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 59, + 118, + 560, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 118, + 560, + 186 + ], + "spans": [ + { + "bbox": [ + 59, + 118, + 560, + 186 + ], + "type": "text", + "content": "- DeepScaleR-1.5B-Preview (Luo et al., 2025) focuses on long-context mathematical reasoning via RL, and is trained over approximately 40k problem-answer pairs drawn from the AIME (Art of Problem Solving, 2024), AMC (Art of Problem Solving, 2023), OMNI-MATH (Gao et al., 2024a), and STILL (RUCAIBox STILL Team, 2025) datasets. Tina-DeepScaleR-1.5B-Preview uses this dataset and mirrors the reward design." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 59, + 191, + 561, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 191, + 561, + 260 + ], + "spans": [ + { + "bbox": [ + 59, + 191, + 561, + 260 + ], + "type": "text", + "content": "- Open-RS1/2/3 (Dang and Ngo, 2025) are three models from the Open-RS project exploring reasoning performance in 1.5B models trained via RL. All Open-RS models are trained on small, high-quality datasets further curated from the s1 (Muennighoff et al., 2025) (i.e., Open-S1) and DeepScaleR (Luo et al., 2025) (i.e., Open-DeepScaleR) datasets. The Tina models (Tina-Open-RS1/2/3) replicate these setups, using identical data splits and reward scaffolding." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 279, + 282, + 293 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 279, + 282, + 293 + ], + "spans": [ + { + "bbox": [ + 50, + 279, + 282, + 293 + ], + "type": "text", + "content": "3.2. Training Setup: Infrastructure & Budget" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 303, + 561, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 303, + 561, + 387 + ], + "spans": [ + { + "bbox": [ + 50, + 303, + 561, + 387 + ], + "type": "text", + "content": "Training Codebase. Our implementation builds upon OpenR1, a fully open reproduction of DeepSeek-R1 (DeepSeek-AI, 2025) which combines the Accelerate (Gugger et al., 2022) and Trl (von Werra et al., 2020) libraries and the DeepSpeed ZeRO optimization (Rajbhandari et al., 2019). It aims to transparently replicate and extend RL methods used for improving reasoning in language models, particularly focusing on aligning model behavior with reasoning-oriented objectives via verifiable reward signals. Our methodology inherits its scaffolding, training utilities, and reward interfaces." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 392, + 568, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 392, + 568, + 502 + ], + "spans": [ + { + "bbox": [ + 50, + 392, + 568, + 502 + ], + "type": "text", + "content": "Training Hyperparameters. We initiated parameter selection by replicating key parameters from OpenR1 (Hugging Face, 2025) and OpenRS (Dang and Ngo, 2025). For all experiments presented in this paper, we deliberately adopted the default or recommended hyperparameter configurations provided in their works. These settings were kept largely fixed across different runs (Table 5). For the main Tina results (Section 4.2), only reward function parameters were adjusted per task, and for ablation studies (Section 4.3), only the specific factor under investigation (e.g., learning rate, LoRA rank/alpha, RL algorithm) was varied (Table 6). This approach intentionally circumvents costly hyperparameter search procedures for our specific setup, ensuring negligible tuning overhead and focusing on the efficacy of the core LoRA-based RL methodology." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 507, + 561, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 507, + 561, + 616 + ], + "spans": [ + { + "bbox": [ + 50, + 507, + 561, + 616 + ], + "type": "text", + "content": "Training Hardware. A key element of our low-cost approach was minimizing the hardware footprint. While distributed RL training algorithms like GRPO often benefit from using three or more GPUs (e.g., dedicating one GPU to an inference engine such as vLLM for faster sample generation), we deliberately targeted a minimal setup using only two NVIDIA L40S GPUs. To enable this, we co-located the RL training process and the vLLM on the same two GPUs by constraining vLLM's GPU memory usage. The training itself utilized data parallelism across both GPUs. While running inference and training concurrently on two GPUs might result in a longer wall-clock training time compared to a setup with dedicated inference GPUs, it significantly reduces the hardware requirement." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 622, + 560, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 622, + 560, + 664 + ], + "spans": [ + { + "bbox": [ + 50, + 622, + 560, + 664 + ], + "type": "text", + "content": "Training Budget. The NVIDIA L40S GPUs we use are accessible via commercial cloud platforms at an approximate rate of \\(1 USD per GPU hour, including 300 GB storage, based on pricing observed at the time of writing (Cudo Compute). The RL training process for our LoRA models proved highly efficient, with a" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 235, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 235, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 62, + 671, + 268, + 685 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 671, + 268, + 685 + ], + "spans": [ + { + "bbox": [ + 62, + 671, + 268, + 685 + ], + "type": "text", + "content": "1https://github.com/huggingface/open-r1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 685, + 560, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 685, + 560, + 721 + ], + "spans": [ + { + "bbox": [ + 50, + 685, + 560, + 721 + ], + "type": "text", + "content": "2Occasionally, NVIDIA RTX 6000 Ada GPUs were used instead, which is reflected in the system configuration metadata on Weights & Biases. From our practical experience, these two GPU types are similar in terms of cost and computational performance. For consistency, we report costs and compute metrics based on the L40S." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 552, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 552, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 552, + 750, + 558, + 759 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 66, + 83, + 545, + 363 + ], + "blocks": [ + { + "bbox": [ + 66, + 83, + 545, + 363 + ], + "lines": [ + { + "bbox": [ + 66, + 83, + 545, + 363 + ], + "spans": [ + { + "bbox": [ + 66, + 83, + 545, + 363 + ], + "type": "table", + "html": "
EXPERIMENTAL TASKTRAINING COST EST.EVALUATION COST EST.TOTAL COST EST.
Baseline: Model Re-Evaluation-$6$6
Main: Tina-STILL-3-1.5B-preview$59$7$66
Main: Tina-DeepScaleR-1.5B-Preview$84$10$94
Main: Tina-Open-RS1$40$11$51
Main: Tina-Open-RS2$15$17$32
Main: Tina-Open-RS3$15$17$32
Ablation: OpenThoughts Dataset$84$10$94
Ablation: OpenR1 Dataset$59$7$66
Ablation: LIMR Dataset$4$4$8
Ablation: DrGRPO Algorithm$15$17$32
Ablation: Learning Rate$7$8$15
Ablation: LoRA Rank/Alpha$14$16$30
Total: All Tasks$396$130$526
Total: Main Tasks$213$62$275
Total: Best Ckpt. in Each Main Task$80$5$85
Total: All Ckpt. in Best-Performance Task$14$17$31
Total: Best Ckpt. in Best-Performance Task$8$1$9
", + "image_path": "a9ee0ea5f785ef391938e14609d9a8376e65fd1348f8d7554bc57edaae2fac3a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 370, + 560, + 420 + ], + "lines": [ + { + "bbox": [ + 50, + 370, + 560, + 420 + ], + "spans": [ + { + "bbox": [ + 50, + 370, + 560, + 420 + ], + "type": "text", + "content": "Table 1: Computational cost breakdown. Costs for all experimental tasks in this paper, measured in USD. The row \"Best Ckpt. in Each Main Task\" denotes the cost of reproducing the best checkpoint in each of Table 7, 8, 9, 10, 11. The row \"All Ckpt. in Best-Performance Task\" denotes the cost of reproducing all checkpoints in Table 10. \"Best Ckpt. in Best-Performance Task\" denotes the cost of reproducing the best checkpoint in Table 10, i.e., the checkpoint at step 450." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 50, + 431, + 561, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 431, + 561, + 528 + ], + "spans": [ + { + "bbox": [ + 50, + 431, + 561, + 528 + ], + "type": "text", + "content": "single RL step typically completing within one minute on this hardware. Evaluating a model checkpoint across our entire suite of six reasoning benchmarks required approximately 1 L40S GPU hours on average. To ensure cost control, we initially established a conservative maximum budget of \\(100 USD for each complete experimental run, encompassing all stages from training to evaluation and miscellaneous tasks. As detailed in Table 1, our actual expenditures were significantly below this ceiling. Our calculation is based on the full Tina model evaluation performance in Appendix D. We believe this low cost makes our setup an accessible testbed for the research community." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 542, + 425, + 559 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 542, + 425, + 559 + ], + "spans": [ + { + "bbox": [ + 50, + 542, + 425, + 559 + ], + "type": "text", + "content": "4. Surprising Effectiveness of Efficient RL Reasoning via LoRA" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 570, + 338, + 585 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 570, + 338, + 585 + ], + "spans": [ + { + "bbox": [ + 50, + 570, + 338, + 585 + ], + "type": "text", + "content": "4.1. Experiments Stage I: Baseline Model Re-Evaluation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 592, + 561, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 592, + 561, + 675 + ], + "spans": [ + { + "bbox": [ + 50, + 592, + 561, + 675 + ], + "type": "text", + "content": "Before presenting Tina's performance, it is crucial to establish fair and reliable comparisons against existing SOTA reasoning models. We note that performance scores reported in the literature for relevant models often stem from evaluations using disparate frameworks (e.g., verl (Sheng et al., 2025), lighteval (Fourrier et al., 2023), lm-eval-harness (Gao et al., 2024b)) and inconsistent inference settings (such as different generation hyperparameters or varying numbers of GPUs). These variations can significantly influence reported metrics, creating potential inconsistencies and hindering reliable comparisons between models." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 680, + 560, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 680, + 560, + 723 + ], + "spans": [ + { + "bbox": [ + 50, + 680, + 560, + 723 + ], + "type": "text", + "content": "To mitigate these confounding factors, we performed a comprehensive re-evaluation of key baseline models using a single, consistent methodology throughout this paper. All baseline evaluations reported herein utilize the lighteval framework integrated with the vLLM (Kwon et al., 2023) inference engine for efficient" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 552, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 552, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 552, + 750, + 558, + 759 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 79, + 83, + 532, + 193 + ], + "blocks": [ + { + "bbox": [ + 79, + 83, + 532, + 193 + ], + "lines": [ + { + "bbox": [ + 79, + 83, + 532, + 193 + ], + "spans": [ + { + "bbox": [ + 79, + 83, + 532, + 193 + ], + "type": "table", + "html": "
BASELINE MODELAIME24AIME25AMC23MATH500GPQAMINervaAvg.
DeepSeek-R1-Distilled-Qwen-1.5B23.3316.6762.5082.6031.8230.1541.18
STILL-3-1.5B-preview26.6726.6767.5086.4034.3427.5744.86
DeepScaleR-1.5B-/Preview36.6726.6777.5087.8031.8231.9948.74
Open-RS126.6720.0072.5083.6035.3528.6844.47
Open-RS226.6713.3362.5085.4034.8526.8441.60
Open-RS343.3320.0067.5083.0033.8428.6846.06
", + "image_path": "56c0a1d34fc3c6892dc823d2eada4025b45a291b64ab9e854d5acd3cf67be71c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 201, + 517, + 213 + ], + "lines": [ + { + "bbox": [ + 50, + 201, + 517, + 213 + ], + "spans": [ + { + "bbox": [ + 50, + 201, + 517, + 213 + ], + "type": "text", + "content": "Table 2: Baseline model re-evaluation. Performance evaluation of baseline models on six reasoning tasks." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 50, + 226, + 559, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 226, + 559, + 294 + ], + "spans": [ + { + "bbox": [ + 50, + 226, + 559, + 294 + ], + "type": "text", + "content": "generation. For comparability with prior work such as OpenR1, we maintained a fixed hardware configuration (two L40S GPUs) and applied a standardized set of vLLM inference parameters across all evaluated baseline models. All scores are zero-shot pass@1 performance. The exact command structure employed for these evaluations is provided in Appendix C.2 for transparency and reproducibility. The results stemming from this consistent re-evaluation protocol are presented in Table 2." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 300, + 559, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 300, + 559, + 328 + ], + "spans": [ + { + "bbox": [ + 50, + 300, + 559, + 328 + ], + "type": "text", + "content": "Particularly, we evaluate the reasoning capabilities of our Tina models and the baselines across a diverse suite of six challenging benchmarks, primarily focused on mathematical and scientific reasoning:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 59, + 334, + 559, + 547 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 59, + 334, + 559, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 334, + 559, + 376 + ], + "spans": [ + { + "bbox": [ + 59, + 334, + 559, + 376 + ], + "type": "text", + "content": "- AIME24/25 (Art of Problem Solving, 2024) contains 30 high-school-level math problems in algebra, geometry, number theory, and combinatorics from the 2024/2025 American Invitational Mathematics Examination. Each problem demands precise multi-step reasoning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 59, + 380, + 559, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 380, + 559, + 409 + ], + "spans": [ + { + "bbox": [ + 59, + 380, + 559, + 409 + ], + "type": "text", + "content": "- AMC23 (Art of Problem Solving, 2023) includes 40 problems from the 2023 American Mathematics Competition, offering a mix of logic and symbolic manipulation tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 59, + 413, + 559, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 413, + 559, + 453 + ], + "spans": [ + { + "bbox": [ + 59, + 413, + 559, + 453 + ], + "type": "text", + "content": "- MATH500 (Hendrycks et al., 2021, Lightman et al., 2023) is a benchmark comprising 500 competition mathematics problems derived from various sources, covering different difficulty levels and often necessitating multi-step derivation and calculation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 59, + 460, + 559, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 460, + 559, + 487 + ], + "spans": [ + { + "bbox": [ + 59, + 460, + 559, + 487 + ], + "type": "text", + "content": "- GPQA Diamond (Rein et al., 2024), hereafter referred to as GPQA, consists of 198 PhD-level science questions across biology, chemistry, and physics. Each question is multiple-choice with subtle distractors." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 59, + 492, + 559, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 492, + 559, + 547 + ], + "spans": [ + { + "bbox": [ + 59, + 492, + 559, + 547 + ], + "type": "text", + "content": "- Minerva (Lewkowycz et al., 2022) includes 272 quantitative reasoning problems generally at the undergraduate level. The questions span multiple STEM fields, including physics, biology, chemistry, and economics, often requiring mathematical modeling or calculation steps. Includes tasks such as calculating enzyme kinetics from reaction data." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 562, + 304, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 562, + 304, + 574 + ], + "spans": [ + { + "bbox": [ + 50, + 562, + 304, + 574 + ], + "type": "text", + "content": "4.2. Experiments Stage II: Tina Model Evaluation" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 583, + 559, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 583, + 559, + 652 + ], + "spans": [ + { + "bbox": [ + 50, + 583, + 559, + 652 + ], + "type": "text", + "content": "We now present the core evaluation results for our Tina models. These experiments assess the reasoning capabilities attained by post-training the DeepSeek-R1-Distill-Qwen-1.5B with minimal parameter updates via LoRA-based RL. The results presented in Table 3 demonstrate that significant reasoning performance can be achieved efficiently, yielding models that are competitive with, or outperform, relevant baselines despite the inherent resource constraints of using parameter-efficient tuning.3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 50, + 658, + 559, + 672 + ], + "lines": [ + { + "bbox": [ + 50, + 658, + 559, + 672 + ], + "spans": [ + { + "bbox": [ + 50, + 658, + 559, + 672 + ], + "type": "text", + "content": "Table 3 summarizes the performance of five distinct Tina models across a suite of six reasoning tasks:" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 680, + 559, + 715 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 680, + 559, + 715 + ], + "spans": [ + { + "bbox": [ + 50, + 680, + 559, + 715 + ], + "type": "text", + "content": "3Tables 3 and 4 adopt a consistent naming pattern where \"Tina-X\" denotes our model is the LoRA counterpart of a baseline model X or is trained on a dataset X (possibly followed with an extra ablation setup). This can reflect the model origin and serve as a direct reference to the public checkpoints for reproducibility." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 552, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 552, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 552, + 750, + 558, + 759 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 52, + 83, + 558, + 162 + ], + "blocks": [ + { + "bbox": [ + 52, + 83, + 558, + 162 + ], + "lines": [ + { + "bbox": [ + 52, + 83, + 558, + 162 + ], + "spans": [ + { + "bbox": [ + 52, + 83, + 558, + 162 + ], + "type": "table", + "html": "
TINA MODELSTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.BASELINE
Tina-STILL-3-1.5B-preview53%36.6730.0077.5084.6033.3326.8448.1644.86
Tina-DeepScaleR-1.5B-/Preview19%43.3326.6767.5086.2037.8828.6848.3848.74
Tina-Open-RS134%43.3320.0080.0084.0035.3528.6848.5644.47
Tina-Open-RS251%43.3326.6777.5087.0036.3632.7250.6041.60
Tina-Open-RS357%36.6723.3382.5085.2037.3731.6249.4546.06
", + "image_path": "4e7e960fb8433289bbb35bd1a4c84458c12331f5c0c13f4f336813af762fbaef.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 170, + 559, + 217 + ], + "lines": [ + { + "bbox": [ + 52, + 170, + 559, + 217 + ], + "spans": [ + { + "bbox": [ + 52, + 170, + 559, + 217 + ], + "type": "text", + "content": "Table 3: Tina model evaluation. Performance comparison between Tina models and corresponding full-parameter-trained SOTA models on six reasoning tasks. The value in the Steps column indicates the training steps of the best model checkpoint within one epoch, the full model checkpoint evaluation is shown in Appendix D. The Baseline column represents the average score achieved by baseline model with full-parameter RL in Table 2." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 52, + 239, + 559, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 239, + 559, + 388 + ], + "spans": [ + { + "bbox": [ + 52, + 239, + 559, + 388 + ], + "type": "text", + "content": "AIME24/25, AMC23, MATH500, GPQA, and Minerva. For each Tina model, we report the extent of training completed (as a percentage of a predefined training stpes within 1 epoch) and the percentage scores achieved on each task. The results compellingly demonstrate the efficacy of our economical LoRA-based RL strategy. All Tina models exhibit substantial reasoning aptitude, achieving average scores in the range of " + }, + { + "bbox": [ + 52, + 239, + 559, + 388 + ], + "type": "inline_equation", + "content": "48.16\\%" + }, + { + "bbox": [ + 52, + 239, + 559, + 388 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 52, + 239, + 559, + 388 + ], + "type": "inline_equation", + "content": "50.60\\%" + }, + { + "bbox": [ + 52, + 239, + 559, + 388 + ], + "type": "text", + "content": ". Significantly, nearly all Tina models notably outperform their corresponding baseline average scores, indicating marked improvements instilled by the parameter-efficient RL. The Tina-Open-RS2 model yielded the highest average performance observed at " + }, + { + "bbox": [ + 52, + 239, + 559, + 388 + ], + "type": "inline_equation", + "content": "50.60\\%" + }, + { + "bbox": [ + 52, + 239, + 559, + 388 + ], + "type": "text", + "content": ". Furthermore, these strong results were achieved with remarkably limited training durations, ranging from just " + }, + { + "bbox": [ + 52, + 239, + 559, + 388 + ], + "type": "inline_equation", + "content": "19\\%" + }, + { + "bbox": [ + 52, + 239, + 559, + 388 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 52, + 239, + 559, + 388 + ], + "type": "inline_equation", + "content": "57\\%" + }, + { + "bbox": [ + 52, + 239, + 559, + 388 + ], + "type": "text", + "content": " of a full training epoch, highlighting the efficiency and rapid adaptation enabled by the Tina approach. These findings strongly support our central hypothesis: robust reasoning capabilities can be effectively and economically cultivated in small language models through the targeted application of LoRA and RL." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 402, + 307, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 402, + 307, + 415 + ], + "spans": [ + { + "bbox": [ + 52, + 402, + 307, + 415 + ], + "type": "text", + "content": "4.3. Experiments Stage III: Tina Ablation Variants" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 424, + 559, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 424, + 559, + 519 + ], + "spans": [ + { + "bbox": [ + 52, + 424, + 559, + 519 + ], + "type": "text", + "content": "To better understand the factors influencing the performance and efficiency of our Tina models within the proposed low-cost framework, we conducted a series of ablation studies. These studies systematically investigate the impact of key design choices and hyperparameter: the underlying training dataset, the learning rate for LoRA updates, the rank of the LoRA adapters, and the specific RL algorithm employed. In each study, we typically varied one factor while holding others constant, often based on a high-performing configuration identified in our main experiments or preliminary runs. The results, summarized in Table 4, provide valuable insights into the robustness and sensitivity of our economical approach." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 526, + 559, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 526, + 559, + 608 + ], + "spans": [ + { + "bbox": [ + 52, + 526, + 559, + 608 + ], + "type": "text", + "content": "Impact of Training Dataset. The first section of Table 4 highlights the influence of the dataset used for RL. We compared seven distinct datasets, varying significantly in size (from " + }, + { + "bbox": [ + 52, + 526, + 559, + 608 + ], + "type": "inline_equation", + "content": "\\approx 1.4\\mathrm{k}" + }, + { + "bbox": [ + 52, + 526, + 559, + 608 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 52, + 526, + 559, + 608 + ], + "type": "inline_equation", + "content": "\\approx 94\\mathrm{k}" + }, + { + "bbox": [ + 52, + 526, + 559, + 608 + ], + "type": "text", + "content": " samples). Strikingly, the Tina-0pen-RS model, trained on a concise dataset of merely 7k examples, achieved the highest average score (50.60%). This outcome surpasses models trained on considerably larger datasets, such as Tina-0penR1 (93.7k samples, 49.26% avg). This observation strongly supports our core \"Tiny\" premise and reflects the intuition that the quality and diversity of the dataset matter more than the data size." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 614, + 559, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 614, + 559, + 681 + ], + "spans": [ + { + "bbox": [ + 52, + 614, + 559, + 681 + ], + "type": "text", + "content": "Sensitivity to Learning Rate. Using the Tina-LIMR configuration as a testbed (second section of Table 4), we assessed sensitivity to the learning rate. Among the tested values " + }, + { + "bbox": [ + 52, + 614, + 559, + 681 + ], + "type": "inline_equation", + "content": "(5 \\times 10^{-6}, 1 \\times 10^{-6}" + }, + { + "bbox": [ + 52, + 614, + 559, + 681 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 52, + 614, + 559, + 681 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-7}" + }, + { + "bbox": [ + 52, + 614, + 559, + 681 + ], + "type": "text", + "content": "), a learning rate of " + }, + { + "bbox": [ + 52, + 614, + 559, + 681 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-6}" + }, + { + "bbox": [ + 52, + 614, + 559, + 681 + ], + "type": "text", + "content": " yielded the optimal average performance " + }, + { + "bbox": [ + 52, + 614, + 559, + 681 + ], + "type": "inline_equation", + "content": "(48.47\\%)" + }, + { + "bbox": [ + 52, + 614, + 559, + 681 + ], + "type": "text", + "content": " for this setup. While performance differences were not drastic, this indicates that learning rate selection remains a factor, although effective results were obtained without extensive tuning." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 689, + 558, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 689, + 558, + 715 + ], + "spans": [ + { + "bbox": [ + 52, + 689, + 558, + 715 + ], + "type": "text", + "content": "Effect of LoRA Rank. The third ablation study investigated the impact of LoRA rank, which directly controls the number of trainable parameters. Testing ranks 4, 8, 16, 32, and 64 on the Tina-LIMR setup, we observed" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 237, + 54, + 373, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 54, + 373, + 64 + ], + "spans": [ + { + "bbox": [ + 237, + 54, + 373, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 552, + 751, + 558, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 552, + 751, + 558, + 757 + ], + "spans": [ + { + "bbox": [ + 552, + 751, + 558, + 757 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 52, + 83, + 558, + 375 + ], + "blocks": [ + { + "bbox": [ + 52, + 83, + 558, + 375 + ], + "lines": [ + { + "bbox": [ + 52, + 83, + 558, + 375 + ], + "spans": [ + { + "bbox": [ + 52, + 83, + 558, + 375 + ], + "type": "table", + "html": "
ABLATION ON DATASETSSTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
Tina-OpenR1 (93.7k)13%36.6726.6775.0086.8039.9030.5149.26
Tina-OpenThoughts (66.1k)30%36.6726.6772.5084.8041.4133.0949.19
Tina-DeepScaleR (40.3k)19%43.3326.6767.5086.2037.8828.6848.38
Tina-STILL-3 (33k)53%36.6730.0077.5084.6033.3326.8448.16
Tina-Open-S1 (18.6k)34%43.3320.0080.0084.0035.3528.6848.56
Tina-Open-RS (7k)51%43.3326.6777.5087.0036.3632.7250.60
Tina-LIMR (1.39k)58%46.6720.0075.0083.8034.8530.5148.47
ABLATION ON LEARNING RATESTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
Tina-LIMR-5e-6-lr29%36.6726.6775.0083.6035.8629.4147.87
Tina-LIMR-1e-6-lr58%46.6720.0075.0083.8034.8530.5148.47
Tina-LIMR-5e-7-lr58%43.3316.6777.5084.6034.8530.5147.91
ABLATION ON LORA RANKSTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
Tina-LIMR-64-LoRA-rank29%20.0030.0077.5084.2038.3831.6246.95
Tina-LIMR-32-LoRA-rank58%46.6720.0075.0083.8034.8530.5148.47
Tina-LIMR-16-LoRA-rank58%43.3333.3370.0083.2035.3528.3148.92
Tina-LIMR-8-LoRA-rank29%30.0026.6782.5083.8033.8430.5147.89
Tina-LIMR-4-LoRA-rank86%36.6720.0085.0083.8031.8229.0447.72
ABLATION ON RL ALGORITHMSTEPS (% OF 1 EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
Tina-Open-RS3-GRPO57%36.6723.3382.5085.2037.3731.6249.45
Tina-Open-RS3-DrGRPO17%43.3323.3380.0085.0035.3530.1549.53
", + "image_path": "7b908d3846cc3b7d51a45a7110eb4424a76ec798139377eb728451a8b9a8038e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 50, + 383, + 560, + 492 + ], + "lines": [ + { + "bbox": [ + 50, + 383, + 560, + 492 + ], + "spans": [ + { + "bbox": [ + 50, + 383, + 560, + 492 + ], + "type": "text", + "content": "Table 4: Tina ablation variants evaluation. Performance evaluation of Tina's ablation variants on six reasoning tasks. The value in the Steps column indicates the training steps of the best model checkpoint within one epoch, the full model checkpoint evaluation is shown in Appendix D. For the number in parentheses (the ablation on datasets), it means the data size of a dataset. During training, this number should be multiplied by the number of generation in GRPO-like algorithm (in our case, that multiplier is 4). For the model names, Tina-LIMR, Tina-LIMR-1e-6-1r and Tina-LIMR-32-LoRA-rank are the same model, we duplicate them for better visualization. The same idea applies to Tina-DeepScaleR and Tina-DeepScaleR-1.5B-Preview, Tina-STILL-3 and Tina-STILL-3-1.5B-preview, Tina-Open-S1 and Tina-Open-RS1, Tina-Open-RS and Tina-Open-RS2, Tina-Open-RS3-GRPO and Tina-Open-RS3." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 503, + 561, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 503, + 561, + 572 + ], + "spans": [ + { + "bbox": [ + 50, + 503, + 561, + 572 + ], + "type": "text", + "content": "considerable robustness. Ranks 8, 16, and 32 all produced strong results, with average scores clustering between " + }, + { + "bbox": [ + 50, + 503, + 561, + 572 + ], + "type": "inline_equation", + "content": "47.89\\%" + }, + { + "bbox": [ + 50, + 503, + 561, + 572 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 503, + 561, + 572 + ], + "type": "inline_equation", + "content": "48.92\\%" + }, + { + "bbox": [ + 50, + 503, + 561, + 572 + ], + "type": "text", + "content": ". Notably, rank 16 achieved the peak performance " + }, + { + "bbox": [ + 50, + 503, + 561, + 572 + ], + "type": "inline_equation", + "content": "(48.92\\%)" + }, + { + "bbox": [ + 50, + 503, + 561, + 572 + ], + "type": "text", + "content": " in this comparison, slightly outperforming rank 32 " + }, + { + "bbox": [ + 50, + 503, + 561, + 572 + ], + "type": "inline_equation", + "content": "(48.47\\%)" + }, + { + "bbox": [ + 50, + 503, + 561, + 572 + ], + "type": "text", + "content": ". Performance decreased slightly at the extremes (rank 4 and 64). This study validates that highly parameter-efficient configurations (low ranks like 16 or 32) are effective, further enhancing the cost-effectiveness and minimal overhead of the Tina approach." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 578, + 561, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 578, + 561, + 660 + ], + "spans": [ + { + "bbox": [ + 50, + 578, + 561, + 660 + ], + "type": "text", + "content": "Comparison of RL Algorithms. Finally, we compared two RL algorithms, GRPO and Dr.GRPO (Liu et al., 2025), using the Tina-Open-RS3 setup (final section of Table 4). Both algorithms led to similar peak average performance levels (49.45% for GRPO vs. 49.53% for Dr.GRPO). However, Dr.GRPO reached its best checkpoint significantly earlier in the training process (17% of an epoch vs. 57% for GRPO). This suggests potential advantages in sample efficiency for Dr.GRPO in this context with an alternative normalization in loss calculation, offering potentially faster convergence and further reductions in training time and cost." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 373, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 373, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 373, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 552, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 552, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 552, + 750, + 558, + 759 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 83, + 487, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 83, + 487, + 99 + ], + "spans": [ + { + "bbox": [ + 53, + 83, + 487, + 99 + ], + "type": "text", + "content": "5. Hypothesis for Effective and Efficient LoRA: Rapid Format Adaptation" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 110, + 559, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 110, + 559, + 218 + ], + "spans": [ + { + "bbox": [ + 53, + 110, + 559, + 218 + ], + "type": "text", + "content": "Less is More LoRA-based RL. To understand why LoRA facilitates both effective and efficient reasoning improvements via RL, we analyze the relationship between training compute and performance, alongside training dynamics. As illustrated in Figure 3, plotting reasoning performance against approximate training FLOPs reveals a stark contrast between full-parameter and LoRA-based training regimes. First, our LoRA-based Tina models achieve reasoning scores comparable or superior to fully fine-tuned baselines while requiring (in some cases) orders of magnitude fewer training FLOPs. We observe that in LoRA models, increased training compute inversely affects performance, in contrast to full-parameter models. This observation highlights a \"less compute can yield more performance\" phenomenon." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 53, + 231, + 556, + 411 + ], + "blocks": [ + { + "bbox": [ + 53, + 231, + 556, + 411 + ], + "lines": [ + { + "bbox": [ + 53, + 231, + 556, + 411 + ], + "spans": [ + { + "bbox": [ + 53, + 231, + 556, + 411 + ], + "type": "image", + "image_path": "1a3b1780e0c1635d42f9fb927eb80189e6d0502e9d868b337a308db1217364ff.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 53, + 422, + 558, + 446 + ], + "lines": [ + { + "bbox": [ + 53, + 422, + 558, + 446 + ], + "spans": [ + { + "bbox": [ + 53, + 422, + 558, + 446 + ], + "type": "text", + "content": "Figure 3: Less is more LoRA-based RL. Approximate training FLOPs vs reasoning performance comparison between Tina and baseline models. The calculation is detailed in Appendix A." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 459, + 559, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 459, + 559, + 594 + ], + "spans": [ + { + "bbox": [ + 53, + 459, + 559, + 594 + ], + "type": "text", + "content": "This finding supports our hypothesis regarding how LoRA achieves such remarkable efficiency, which relates to the principle of \"learn structure/format, maintain knowledge.\" We posit that LoRA excels in this scenario because RL for reasoning heavily rewards the model's ability to generate outputs in a specific, verifiable format or structure (e.g., step-by-step reasoning chains). LoRA appears to be highly adept at learning these structural and stylistic patterns with minimal parameter changes, thus requiring very few FLOPs. At the same time, because LoRA modifies only a tiny fraction of the weights, it largely preserves the base model's vast pre-trained knowledge. Therefore, LoRA efficiently teaches the model how to format its existing knowledge into effective reasoning traces, rather than potentially imposing costly relearning of concepts or procedures that extensive full-parameter updates might entail. We hypothesize that this focus on structural adaptation allows Tina to achieve high reasoning performance with minimal computational investment." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 601, + 559, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 601, + 559, + 723 + ], + "spans": [ + { + "bbox": [ + 53, + 601, + 559, + 723 + ], + "type": "text", + "content": "Phase Transition in LoRA-based RL. Further insights into the LoRA-based RL mechanism arise from analyzing the training logs. That is, a distinct pattern emerges in Figure 4, which displays accuracy rewards, format rewards, and completion lengths over training steps for various Tina model runs. We consistently observe a training phase transition or turning point evident in the format-related metrics (format reward, row 2; completion length, row 3) across most Tina models. Around this transition point (indicated by the green vertical dashed line), the format reward often peaks or destabilizes, while the completion length frequently reaches a minimum before potentially reversing its trend. Notably, this relatively sharp transition observed in format and length metrics does not typically have a corresponding distinct turning point in the accuracy reward plots (row 1). The accuracy reward often exhibits more gradual fluctuations or slower drift over the" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 237, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 237, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 548, + 751, + 558, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 548, + 751, + 558, + 757 + ], + "spans": [ + { + "bbox": [ + 548, + 751, + 558, + 757 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 85, + 425, + 99 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 85, + 425, + 99 + ], + "spans": [ + { + "bbox": [ + 50, + 85, + 425, + 99 + ], + "type": "text", + "content": "training duration, without a clear inflection aligned with the format transition." + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 76, + 110, + 301, + 240 + ], + "blocks": [ + { + "bbox": [ + 76, + 110, + 301, + 240 + ], + "lines": [ + { + "bbox": [ + 76, + 110, + 301, + 240 + ], + "spans": [ + { + "bbox": [ + 76, + 110, + 301, + 240 + ], + "type": "image", + "image_path": "d40c018fc96b24189e82fe7ab40c407172146c7f87651959576d69ef64fe1e94.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 307, + 109, + 534, + 240 + ], + "blocks": [ + { + "bbox": [ + 307, + 109, + 534, + 240 + ], + "lines": [ + { + "bbox": [ + 307, + 109, + 534, + 240 + ], + "spans": [ + { + "bbox": [ + 307, + 109, + 534, + 240 + ], + "type": "image", + "image_path": "59fd125d2f1a13bd4fccfe6713425a2c004cd392986e4c427eb467173796bb57.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 76, + 243, + 301, + 373 + ], + "blocks": [ + { + "bbox": [ + 76, + 243, + 301, + 373 + ], + "lines": [ + { + "bbox": [ + 76, + 243, + 301, + 373 + ], + "spans": [ + { + "bbox": [ + 76, + 243, + 301, + 373 + ], + "type": "image", + "image_path": "a927aeddf98e9e4ddcd83a2c9de8d7f68786d344059baf05973d53f2a46f7f94.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 307, + 243, + 534, + 373 + ], + "blocks": [ + { + "bbox": [ + 307, + 243, + 534, + 373 + ], + "lines": [ + { + "bbox": [ + 307, + 243, + 534, + 373 + ], + "spans": [ + { + "bbox": [ + 307, + 243, + 534, + 373 + ], + "type": "image", + "image_path": "a253375ed3cfaee7d217e3d0f9b8bbed5a508724cc1b432063d0e1b796a0d874.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 77, + 376, + 301, + 506 + ], + "blocks": [ + { + "bbox": [ + 77, + 376, + 301, + 506 + ], + "lines": [ + { + "bbox": [ + 77, + 376, + 301, + 506 + ], + "spans": [ + { + "bbox": [ + 77, + 376, + 301, + 506 + ], + "type": "image", + "image_path": "a680baf93b2b90175aa50f3beb3cff201d9162fda811c1a9da2a5e11491e4d83.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 49, + 517, + 560, + 567 + ], + "lines": [ + { + "bbox": [ + 49, + 517, + 560, + 567 + ], + "spans": [ + { + "bbox": [ + 49, + 517, + 560, + 567 + ], + "type": "text", + "content": "Figure 4: Phase transition in LoRA-based RL. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1. The \"training turning point\" in the legend means the step where the format-like metrics (e.g., format reward, completion length) start to destabilize. Refer to Appendix E for the full set of plots." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 307, + 376, + 534, + 506 + ], + "blocks": [ + { + "bbox": [ + 307, + 376, + 534, + 506 + ], + "lines": [ + { + "bbox": [ + 307, + 376, + 534, + 506 + ], + "spans": [ + { + "bbox": [ + 307, + 376, + 534, + 506 + ], + "type": "image", + "image_path": "d641c625095868eb4819266432c0c7a072fc31c2f491c3d79d392f1711d5b00e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 579, + 561, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 579, + 561, + 717 + ], + "spans": [ + { + "bbox": [ + 50, + 579, + 561, + 717 + ], + "type": "text", + "content": "Another crucial observation is the timing of optimal performance: the best-performing checkpoint, yielding the highest reasoning accuracy on held-out evaluations, consistently occurs just prior to or around this observed phase transition point in the format metrics (indicated by the red vertical dashed line). This decoupling between the dynamics of accuracy-based and format-based metrics suggests that the LoRA-based RL process rapidly optimizes the model's ability to adhere to the structural and stylistic elements rewarded by the format score and length constraints. The subsequent transition point may signify where this structural optimization saturates, becomes unstable, or perhaps begins to compromise generative quality in other ways (e.g., by overly constraining or expanding length). The fact that peak reasoning accuracy is achieved just before this format-driven transition implies that while learning the correct output format is essential and efficiently achieved via LoRA, pushing further on format-centric optimization alone does not necessarily" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 548, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 548, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 548, + 750, + 558, + 759 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 85, + 558, + 112 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 85, + 558, + 112 + ], + "spans": [ + { + "bbox": [ + 52, + 85, + 558, + 112 + ], + "type": "text", + "content": "yield better reasoning, and may even be detrimental. This reinforces our hypothesis that LoRA efficiently adapts the model by primarily learning the form required for effective reasoning." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 134, + 137, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 134, + 137, + 148 + ], + "spans": [ + { + "bbox": [ + 52, + 134, + 137, + 148 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 160, + 558, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 160, + 558, + 255 + ], + "spans": [ + { + "bbox": [ + 52, + 160, + 558, + 255 + ], + "type": "text", + "content": "We presented Tina to demonstrate that effective reasoning capabilities can be instilled in language models with efficiency and effectiveness. The principal contribution of Tina lies in democratizing access to RL-driven reasoning model development. By combining LoRA with RL on a 1.5B parameter base model, we achieved reasoning performance competitive with significantly larger models, accomplishing this within an estimated computational budget of only $9. This outcome prompts reflection on the factors enabling such minimalist approaches, and on their possible future trajectories. Despite encouraging results, this work is subject to certain limitations:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 262, + 558, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 262, + 558, + 301 + ], + "spans": [ + { + "bbox": [ + 52, + 262, + 558, + 301 + ], + "type": "text", + "content": "Base Model Scale: Our experiments centered on a 1.5B parameter model. While showcasing cost-performance efficiency, the absolute reasoning ceiling achievable with this \"tiny\" model may naturally be lower for complex, multi-step reasoning problems than what larger models can offer." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 309, + 558, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 309, + 558, + 350 + ], + "spans": [ + { + "bbox": [ + 52, + 309, + 558, + 350 + ], + "type": "text", + "content": "Reasoning Task Scope: Our evaluation focused primarily on mathematical and formal logic reasoning benchmarks (AIME, AMC, MATH, GPQA, Minerva). The effectiveness and transferability of the learned reasoning skills to other domains, such as coding, warrants further investigation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 357, + 558, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 357, + 558, + 411 + ], + "spans": [ + { + "bbox": [ + 52, + 357, + 558, + 411 + ], + "type": "text", + "content": "Hyperparameter Optimization: We intentionally minimized hyperparameter tuning costs by adopting established configurations. While this demonstrates a certain form of robustness to our methodology, there may be potential for further performance gains derived from additional tuning, perhaps tailored to the interplay between LoRA, the RL algorithm, and the target reasoning tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 433, + 175, + 447 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 433, + 175, + 447 + ], + "spans": [ + { + "bbox": [ + 52, + 433, + 175, + 447 + ], + "type": "text", + "content": "7. Acknowledgment" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 459, + 558, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 459, + 558, + 580 + ], + "spans": [ + { + "bbox": [ + 52, + 459, + 558, + 580 + ], + "type": "text", + "content": "We want to express our gratitude to the broader open-source community. This research was made possible by leveraging numerous publicly available resources, including training and evaluation framework, open datasets, accessible pre-trained language models, and the insights shared through technical reports. The computational resources required for the experiments described herein were provided by the Center for Advanced Research Computing (CARC) at the University of Southern California (USC). We are grateful for the support which enabled the training and evaluation of our models. J.A. was supported by the National Science Foundation Graduate Research Fellowship Program under Grant No. DGE-1842487. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect the views of the National Science Foundation." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 237, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 237, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 548, + 750, + 558, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 548, + 750, + 558, + 757 + ], + "spans": [ + { + "bbox": [ + 548, + 750, + 558, + 757 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 84, + 119, + 97 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 84, + 119, + 97 + ], + "spans": [ + { + "bbox": [ + 52, + 84, + 119, + 97 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 110, + 560, + 719 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 51, + 110, + 560, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 110, + 560, + 138 + ], + "spans": [ + { + "bbox": [ + 51, + 110, + 560, + 138 + ], + "type": "text", + "content": "Pranjal Aggarwal and Sean Welleck. L1: Controlling how long a reasoning model thinks with reinforcement learning, 2025. URL https://arxiv.org/abs/2503.04697." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 146, + 559, + 174 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 146, + 559, + 174 + ], + "spans": [ + { + "bbox": [ + 52, + 146, + 559, + 174 + ], + "type": "text", + "content": "Zeyuan Allen-Zhu and Yuanzhi Li. Physics of language models: Part 3.3, knowledge capacity scaling laws. In Proceedings of International Conference on Learning Representations (ICLR), 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 182, + 559, + 210 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 182, + 559, + 210 + ], + "spans": [ + { + "bbox": [ + 52, + 182, + 559, + 210 + ], + "type": "text", + "content": "Art of Problem Solving. Amc problems and solutions, 2023. URL https://artofproblemsolving.com/wiki/index.php/AMC_12_Problems_and_Solutions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 218, + 559, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 218, + 559, + 246 + ], + "spans": [ + { + "bbox": [ + 52, + 218, + 559, + 246 + ], + "type": "text", + "content": "Art of Problem Solving. Aime problems and solutions, February 2024. URL https://artofproblemsolving.com/wiki/index.php/AIME_Problems_and_Solutions." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 255, + 559, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 255, + 559, + 296 + ], + "spans": [ + { + "bbox": [ + 52, + 255, + 559, + 296 + ], + "type": "text", + "content": "Yinlam Chow, Guy Tennenholtz, Izzeddin Gur, Vincent Zhuang, Bo Dai, Sridhar Thiagarajan, Craig Boutilier, Rishabh Agarwal, Aviral Kumar, and Aleksandra Faust. Inference-aware fine-tuning for Best-of-N sampling in large language models, 2024. URL https://arxiv.org/abs/2412.15287." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 304, + 559, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 304, + 559, + 331 + ], + "spans": [ + { + "bbox": [ + 52, + 304, + 559, + 331 + ], + "type": "text", + "content": "Cudo Compute. Nvidia L40S pricing. URL https://www.cudocompute.com/products/gpu-cloud/nvidia-l40s. Accessed: 2025-04-21." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 340, + 558, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 340, + 558, + 395 + ], + "spans": [ + { + "bbox": [ + 52, + 340, + 558, + 395 + ], + "type": "text", + "content": "Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, Jiarui Yuan, Huayu Chen, Kaiyan Zhang, Xingtai Lv, Shuo Wang, Yuan Yao, Xu Han, Hao Peng, Yu Cheng, Zhiyuan Liu, Maosong Sun, Bowen Zhou, and Ning Ding. Process reinforcement through implicit rewards, 2025. URL https://arxiv.org/abs/2502.01456." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 403, + 558, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 403, + 558, + 430 + ], + "spans": [ + { + "bbox": [ + 52, + 403, + 558, + 430 + ], + "type": "text", + "content": "Quy-Anh Dang and Chris Ngo. Reinforcement learning for reasoning in small llms: What works and what doesn't, 2025. URL https://arxiv.org/abs/2503.16219." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 440, + 559, + 467 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 440, + 559, + 467 + ], + "spans": [ + { + "bbox": [ + 52, + 440, + 559, + 467 + ], + "type": "text", + "content": "DeepSeek-AI. DeepSeek-R1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 475, + 559, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 475, + 559, + 517 + ], + "spans": [ + { + "bbox": [ + 52, + 475, + 559, + 517 + ], + "type": "text", + "content": "Clémentine Fourrier, Nathan Habib, Hynek Kydlíček, Thomas Wolf, and Lewis Tunstall. Lighteval: A lightweight framework for llm evaluation, 2023. URL https://github.com/huggingface/lighteval." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 525, + 559, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 525, + 559, + 592 + ], + "spans": [ + { + "bbox": [ + 52, + 525, + 559, + 592 + ], + "type": "text", + "content": "Bofei Gao, Feifan Song, Zhe Yang, Zefan Cai, Yibo Miao, Qingxiu Dong, Lei Li, Chenghao Ma, Liang Chen, Runxin Xu, Zhengyang Tang, Benyou Wang, Daoguang Zan, Shanghaoran Quan, Ge Zhang, Lei Sha, Yichang Zhang, Xuancheng Ren, Tianyu Liu, and Baobao Chang. Omni-MATH: A universal olympiad level mathematic benchmark for large language models, 2024a. URL https://arxiv.org/abs/2410.07985." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 602, + 559, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 602, + 559, + 670 + ], + "spans": [ + { + "bbox": [ + 52, + 602, + 559, + 670 + ], + "type": "text", + "content": "Leo Gao, Jonathan Tow, Baber Abbasi, Stella Biderman, Sid Black, Anthony DiPofi, Charles Foster, Laurence Golding, Jeffrey Hsu, Alain Le Noac'h, Haonan Li, Kyle McDonell, Niklas Muennighoff, Chris Ociepa, Jason Phang, Laria Reynolds, Hailey Schoelkopf, Aviya Skowron, Lintang Sutawika, Eric Tang, Anish Thite, Ben Wang, Kevin Wang, and Andy Zou. A framework for few-shot language model evaluation, 07 2024b. URL https://zenodo.org/records/12608602." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 52, + 678, + 558, + 719 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 678, + 558, + 719 + ], + "spans": [ + { + "bbox": [ + 52, + 678, + 558, + 719 + ], + "type": "text", + "content": "Sylvain Gugger, Lysandre Debut, Thomas Wolf, Philipp Schmid, Zachary Mueller, Sourab Mangrulkar, Marc Sun, and Benjamin Bossan. Accelerate: Training and inference at scale made simple, efficient and adaptable., 2022. URL https://github.com/huggingface/accelerate." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 235, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 235, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 548, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 548, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 548, + 750, + 558, + 759 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 85, + 560, + 724 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 52, + 85, + 559, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 85, + 559, + 112 + ], + "spans": [ + { + "bbox": [ + 52, + 85, + 559, + 112 + ], + "type": "text", + "content": "Zeyu Han, Chao Gao, Jinyang Liu, Jeff Zhang, and Sai Qian Zhang. Parameter-efficient fine-tuning for large models: A comprehensive survey, 2024. URL https://arxiv.org/abs/2403.14608." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 120, + 560, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 120, + 560, + 161 + ], + "spans": [ + { + "bbox": [ + 52, + 120, + 560, + 161 + ], + "type": "text", + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset, 2021. URL https://arxiv.org/abs/2103.03874." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 169, + 560, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 169, + 560, + 208 + ], + "spans": [ + { + "bbox": [ + 52, + 169, + 560, + 208 + ], + "type": "text", + "content": "Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuzhhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LoRA: Low-rank adaptation of large language models, 2021. URL https://arxiv.org/abs/2106.09685." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 217, + 560, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 217, + 560, + 258 + ], + "spans": [ + { + "bbox": [ + 52, + 217, + 560, + 258 + ], + "type": "text", + "content": "Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, and Heung-Yeung Shum Xiangyu Zhang. Open-Reasoner-Zero: An open source approach to scaling reinforcement learning on the base model, 2025. URL https://github.com/Open-Reasoner-Zero/Open-Reasoner-Zero." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 266, + 560, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 266, + 560, + 308 + ], + "spans": [ + { + "bbox": [ + 52, + 266, + 560, + 308 + ], + "type": "text", + "content": "Zhen Huang, Haoyang Zou, Xuefeng Li, Yixiu Liu, Yuxiang Zheng, Ethan Chern, Shijie Xia, Yiwei Qin, Weizhe Yuan, and Pengfei Liu. O1 replication journey - part 2: Surpassing o1-preview through simple distillation, big progress or bitter lesson?, 2024. URL https://arxiv.org/abs/2411.16489." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 315, + 560, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 315, + 560, + 342 + ], + "spans": [ + { + "bbox": [ + 52, + 315, + 560, + 342 + ], + "type": "text", + "content": "Hugging Face. Open r1: A fully open reproduction of deepseek-r1, January 2025. URL https://github.com/huggingface/open-r1." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 350, + 560, + 390 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 350, + 560, + 390 + ], + "spans": [ + { + "bbox": [ + 52, + 350, + 560, + 390 + ], + "type": "text", + "content": "Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of Symposium on Operating Systems Principles (SOSP), 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 398, + 560, + 467 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 398, + 560, + 467 + ], + "spans": [ + { + "bbox": [ + 52, + 398, + 560, + 467 + ], + "type": "text", + "content": "Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V. Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, Yuling Gu, Saumya Malik, Victoria Graf, Jena D. Hwang, Jiangjiang Yang, Ronan Le Bras, Oyvind Tafjord, Chris Wilhelm, Luca Soldaini, Noah A. Smith, Yizhong Wang, Pradeep Dasigi, and Hannaneh Hajishirzi. Tulu 3: Pushing frontiers in open language model post-training, 2025. URL https://arxiv.org/abs/2411.15124." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 475, + 560, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 475, + 560, + 529 + ], + "spans": [ + { + "bbox": [ + 52, + 475, + 560, + 529 + ], + "type": "text", + "content": "Aitor Lewkowycz, Anders Andreassen, David Dohan, Ethan Dyer, Henryk Michalewski, Vinay Ramasesh, Ambrose Slone, Cem Anil, Imanol Schlag, Theo Gutman-Solo, Yuhuai Wu, Behnam Neyshabur, Guy Gur-Ari, and Vedant Misra. Solving quantitative reasoning problems with language models. In Proceedings of Advances in Neural Information Processing Systems (NeurIPS), volume 35, pages 3843-3857, 2022." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 536, + 560, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 536, + 560, + 578 + ], + "spans": [ + { + "bbox": [ + 52, + 536, + 560, + 578 + ], + "type": "text", + "content": "Jia LI, Edward Beeching, Lewis Tunstall, Ben Lipkin, Roman Soletskyi, Shengyi Costa Huang, Kashif Rasul, Longhui Yu, Albert Jiang, Ziju Shen, Zihan Qin, Bin Dong, Li Zhou, Yann Fleureau, Guillaume Lample, and Stanislas Polu. NuminaMath, 2024. URL https://huggingface.co/AI-MO/NuminaMath-CoT." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 585, + 560, + 627 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 585, + 560, + 627 + ], + "spans": [ + { + "bbox": [ + 52, + 585, + 560, + 627 + ], + "type": "text", + "content": "Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. In Proceedings of International Conference on Learning Representations (ICLR), 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 634, + 560, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 634, + 560, + 673 + ], + "spans": [ + { + "bbox": [ + 52, + 634, + 560, + 673 + ], + "type": "text", + "content": "Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective, 2025. URL https://arxiv.org/abs/2503.20783." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 682, + 560, + 724 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 682, + 560, + 724 + ], + "spans": [ + { + "bbox": [ + 52, + 682, + 560, + 724 + ], + "type": "text", + "content": "Michael Luo, Sijun Tan, Justin Wong, Xiaoxiang Shi, William Y. Tang, Manan Roongta, Colin Cai, Jeffrey Luo, Tianjun Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. DeepScaleR: Surpassing o1-preview with a 1.5b model by scaling rl, 2025. URL https://agentica-project.com/." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 235, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 235, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 548, + 751, + 558, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 548, + 751, + 558, + 758 + ], + "spans": [ + { + "bbox": [ + 548, + 751, + 558, + 758 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 85, + 561, + 690 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 52, + 85, + 558, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 85, + 558, + 112 + ], + "spans": [ + { + "bbox": [ + 52, + 85, + 558, + 112 + ], + "type": "text", + "content": "Trung Quoc Luong, Xinbo Zhang, Zhanming Jie, Peng Sun, Xiaoran Jin, and Hang Li. ReFT: Reasoning with reinforced fine-tuning, 2024. URL https://arxiv.org/abs/2401.08967." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 121, + 561, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 121, + 561, + 176 + ], + "spans": [ + { + "bbox": [ + 52, + 121, + 561, + 176 + ], + "type": "text", + "content": "Chengqi Lyu, Songyang Gao, Yuzhe Gu, Wenwei Zhang, Jianfei Gao, Kuikun Liu, Ziyi Wang, Shuaibin Li, Qian Zhao, Haian Huang, Weihan Cao, Jiangning Liu, Hongwei Liu, Junnan Liu, Songyang Zhang, Dahua Lin, and Kai Chen. Exploring the limit of outcome reward for learning mathematical reasoning, 2025. URL https://arxiv.org/abs/2502.06781." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 184, + 560, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 184, + 560, + 225 + ], + "spans": [ + { + "bbox": [ + 52, + 184, + 560, + 225 + ], + "type": "text", + "content": "Sourab Mangrulkar, Sylvain Gugger, Lysandre Debut, Younes Belkada, Sayak Paul, and Benjamin Bossan. PEFT: State-of-the-art parameter-efficient fine-tuning methods, 2022. URL https://github.com/huggingface/peft." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 233, + 560, + 288 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 233, + 560, + 288 + ], + "spans": [ + { + "bbox": [ + 52, + 233, + 560, + 288 + ], + "type": "text", + "content": "Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, Wayne Xin Zhao, Zheng Liu, Zhongyuan Wang, and Ji-Rong Wen. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems, 2024. URL https://arxiv.org/abs/2412.09413." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 297, + 560, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 297, + 560, + 338 + ], + "spans": [ + { + "bbox": [ + 52, + 297, + 560, + 338 + ], + "type": "text", + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 346, + 560, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 346, + 560, + 374 + ], + "spans": [ + { + "bbox": [ + 52, + 346, + 560, + 374 + ], + "type": "text", + "content": "NovaSky Team. Sky-T1: Train your own o1 preview model within $450, 2025. URL https://novasky-ai.github.io/posts/sky-t1." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 52, + 383, + 473, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 383, + 473, + 397 + ], + "spans": [ + { + "bbox": [ + 52, + 383, + 473, + 397 + ], + "type": "text", + "content": "OpenAI. OpenAI o1 system card, 2024. URL https://arxiv.org/abs/2412.16720." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 405, + 490, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 405, + 490, + 419 + ], + "spans": [ + { + "bbox": [ + 52, + 405, + 490, + 419 + ], + "type": "text", + "content": "OpenThoughts Team. Open Thoughts, January 2025. URL https://open-thoughts.ai." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 427, + 558, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 427, + 558, + 468 + ], + "spans": [ + { + "bbox": [ + 52, + 427, + 558, + 468 + ], + "type": "text", + "content": "Debjit Paul, Mete Ismayilzada, Maxime Peyrard, Beatrix Borges, Antoine Bosselut, Robert West, and Boi Faltings. REFINER: Reasoning feedback on intermediate representations. In Proceedings of European Chapter of the ACL (EACL), pages 1100-1126, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 477, + 558, + 505 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 477, + 558, + 505 + ], + "spans": [ + { + "bbox": [ + 52, + 477, + 558, + 505 + ], + "type": "text", + "content": "Zhenting Qi, Mingyuan Ma, Jiahang Xu, Li Lyna Zhang, Fan Yang, and Mao Yang. Mutual reasoning makes smaller LLMs stronger problem-solvers, 2024. URL https://arxiv.org/abs/2408.06195." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 52, + 514, + 560, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 514, + 560, + 553 + ], + "spans": [ + { + "bbox": [ + 52, + 514, + 560, + 553 + ], + "type": "text", + "content": "Samyam Rajbhandari, Jeff Rasley, Olatunj Ruwase, and Yuxiong He. Zero: Memory optimization towards training A trillion parameter models. CoRR, abs/1910.02054, 2019. URL http://arxiv.org/abs/1910.02054." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 52, + 563, + 560, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 563, + 560, + 604 + ], + "spans": [ + { + "bbox": [ + 52, + 563, + 560, + 604 + ], + "type": "text", + "content": "David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R Bowman. GPQA: A graduate-level google-proof Q&A benchmark. In Proceedings of Conference on Language Modeling (COLM), 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 52, + 612, + 560, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 612, + 560, + 641 + ], + "spans": [ + { + "bbox": [ + 52, + 612, + 560, + 641 + ], + "type": "text", + "content": "RUCAIBox STILL Team. STILL-3-1.5B-preview: Enhancing slow thinking abilities of small models through reinforcement learning. 2025. URL https://github.com/RUCAIBox/Slow_Thinking_with_LLMs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 52, + 648, + 560, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 648, + 560, + 690 + ], + "spans": [ + { + "bbox": [ + 52, + 648, + 560, + 690 + ], + "type": "text", + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, Y. K. Li, Y. Wu, and Daya Guo. DeepSeekMath: Pushing the limits of mathematical reasoning in open language models, 2024. URL https://arxiv.org/abs/2402.03300." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 235, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 235, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 548, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 548, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 548, + 750, + 558, + 759 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 85, + 561, + 388 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 51, + 85, + 559, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 85, + 559, + 140 + ], + "spans": [ + { + "bbox": [ + 51, + 85, + 559, + 140 + ], + "type": "text", + "content": "Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. Hybridflow: A flexible and efficient rlhf framework. In Proceedings of European Conference on Computer Systems (EuroSys), EuroSys '25, page 1279-1297. ACM, March 2025. doi: 10.1145/3689031.3696075. URL http://dx.doi.org/10.1145/3689031.3696075." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 148, + 561, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 148, + 561, + 190 + ], + "spans": [ + { + "bbox": [ + 52, + 148, + 561, + 190 + ], + "type": "text", + "content": "Leandro von Werra, Younes Belkada, Lewis Tunstall, Edward Beeching, Tristan Thrush, Nathan Lambert, Shengyi Huang, Kashif Rasul, and Quentin Gallouédec. Trl: Transformer reinforcement learning, 2020. URL https://github.com/huggingface/trl." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 198, + 560, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 198, + 560, + 225 + ], + "spans": [ + { + "bbox": [ + 52, + 198, + 560, + 225 + ], + "type": "text", + "content": "Shangshang Wang and Willie Neiswanger. LLM reasoning: Curated insights, 2025. URL https://shangshangwang.notion.site/llm-reasoning." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 233, + 559, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 233, + 559, + 275 + ], + "spans": [ + { + "bbox": [ + 52, + 233, + 559, + 275 + ], + "type": "text", + "content": "Zhenyu Wu, Qingkai Zeng, Zhihan Zhang, Zhaoxuan Tan, Chao Shen, and Meng Jiang. Large language models can self-correct with key condition verification. In Proceedings of Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 12846-12867, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 283, + 560, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 283, + 560, + 339 + ], + "spans": [ + { + "bbox": [ + 52, + 283, + 560, + 339 + ], + "type": "text", + "content": "Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, Chenyang Shao, Yuwei Yan, Qinglong Yang, Yiwen Song, Sijian Ren, Xinyuan Hu, Yu Li, Jie Feng, Chen Gao, and Yong Li. Towards large reasoning models: A survey of reinforced reasoning with large language models, 2025. URL https://arxiv.org/abs/2501.09686." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 346, + 560, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 346, + 560, + 388 + ], + "spans": [ + { + "bbox": [ + 52, + 346, + 560, + 388 + ], + "type": "text", + "content": "Weihao Zeng, Yuzhen Huang, Qian Liu, Wei Liu, Keqing He, Zejun Ma, and Junxian He. SimpleRL-Zoo: Investigating and taming zero reinforcement learning for open base models in the wild, 2025. URL https://arxiv.org/abs/2503.18892." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 235, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 235, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 548, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 548, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 548, + 750, + 558, + 759 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 271, + 83, + 332, + 100 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 271, + 83, + 332, + 100 + ], + "spans": [ + { + "bbox": [ + 271, + 83, + 332, + 100 + ], + "type": "text", + "content": "Appendix" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 118, + 170, + 133 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 118, + 170, + 133 + ], + "spans": [ + { + "bbox": [ + 52, + 118, + 170, + 133 + ], + "type": "text", + "content": "A. Cost Breakdown" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 144, + 561, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 144, + 561, + 185 + ], + "spans": [ + { + "bbox": [ + 52, + 144, + 561, + 185 + ], + "type": "text", + "content": "This section provides further details on how training data amounts, computational cost, time cost, and performance metrics reported in this paper – particularly those presented in figures like Figures 1 and 3 – were determined and should be interpreted." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 192, + 559, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 192, + 559, + 259 + ], + "spans": [ + { + "bbox": [ + 52, + 192, + 559, + 259 + ], + "type": "text", + "content": "Overall Comparison (Figure 1). For the baseline models included in Figure 1, the approximate training data amounts, computational costs (typically reported as GPU hours or total FLOPs), and training times are sourced from their respective technical reports or publications, leveraging the helpful summary provided in the Open-RS paper (Dang and Ngo, 2025). Reasoning performance scores for all models, encompassing both baselines and our Tina models, stem from results presented in Tables 2 and 3." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 266, + 334, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 266, + 334, + 280 + ], + "spans": [ + { + "bbox": [ + 52, + 266, + 334, + 280 + ], + "type": "text", + "content": "Also, it is crucial to understand the scope of reported costs:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 59, + 287, + 558, + 358 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 59, + 287, + 557, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 287, + 557, + 326 + ], + "spans": [ + { + "bbox": [ + 59, + 287, + 557, + 326 + ], + "type": "text", + "content": "- Epoch vs. Best Checkpoint: Costs cited for Tina and baseline models reflect the resources needed to complete a full training epoch or a predefined training run, not necessarily the minimal cost to reach the single best-performing checkpoint within that run." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 59, + 333, + 558, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 333, + 558, + 358 + ], + "spans": [ + { + "bbox": [ + 59, + 333, + 558, + 358 + ], + "type": "text", + "content": "- Training vs. Evaluation: Reported costs cover training only, omitting the computational expense required for model evaluation across benchmarks since such information is missing from several baseline models." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 52, + 367, + 559, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 367, + 559, + 434 + ], + "spans": [ + { + "bbox": [ + 52, + 367, + 559, + 434 + ], + "type": "text", + "content": "Particularly, the " + }, + { + "bbox": [ + 52, + 367, + 559, + 434 + ], + "type": "inline_equation", + "content": "9 USD in the abstract represents the estimated cost to train the Tina model up to its best-performing checkpoint and subsequently evaluate that specific checkpoint. For context comparing potential full training runs, the cost to train a Tina model for a complete epoch is" + }, + { + "bbox": [ + 52, + 367, + 559, + 434 + ], + "type": "text", + "content": "14 USD (training only). Including evaluation costs for such a full run would increase the total to approximately $31 USD. We emphasize the $9 as representing the efficient path to the best Tina model." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 441, + 559, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 441, + 559, + 496 + ], + "spans": [ + { + "bbox": [ + 52, + 441, + 559, + 496 + ], + "type": "text", + "content": "FLOPs Estimation (Figure 3). The approximate training FLOPs shown in Figure 3 serve as a hardware-agnostic measure of computational work. For both Tina and baseline models, these values were estimated based on reported training durations and hardware configurations sourced from technical reports or the Open-RS summary, using standard FLOPs calculation methodologies." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 548, + 750, + 558, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 548, + 750, + 558, + 757 + ], + "spans": [ + { + "bbox": [ + 548, + 750, + 558, + 757 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 83, + 275, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 83, + 275, + 99 + ], + "spans": [ + { + "bbox": [ + 51, + 83, + 275, + 99 + ], + "type": "text", + "content": "B. Background behind Tina Training" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 111, + 174, + 125 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 111, + 174, + 125 + ], + "spans": [ + { + "bbox": [ + 51, + 111, + 174, + 125 + ], + "type": "text", + "content": "B.1. GRPO Formulation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 133, + 559, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 133, + 559, + 163 + ], + "spans": [ + { + "bbox": [ + 50, + 133, + 559, + 163 + ], + "type": "text", + "content": "Recall the following formulation of GRPO: For each question " + }, + { + "bbox": [ + 50, + 133, + 559, + 163 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 50, + 133, + 559, + 163 + ], + "type": "text", + "content": ", GRPO samples a group " + }, + { + "bbox": [ + 50, + 133, + 559, + 163 + ], + "type": "inline_equation", + "content": "G = \\{o_1, o_2, \\ldots, o_G\\}" + }, + { + "bbox": [ + 50, + 133, + 559, + 163 + ], + "type": "text", + "content": " of outputs from the old policy " + }, + { + "bbox": [ + 50, + 133, + 559, + 163 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta_{\\mathrm{old}}}" + }, + { + "bbox": [ + 50, + 133, + 559, + 163 + ], + "type": "text", + "content": " and optimizes the policy " + }, + { + "bbox": [ + 50, + 133, + 559, + 163 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 50, + 133, + 559, + 163 + ], + "type": "text", + "content": " by maximizing the following objective:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 171, + 542, + 219 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 171, + 542, + 219 + ], + "spans": [ + { + "bbox": [ + 67, + 171, + 542, + 219 + ], + "type": "interline_equation", + "content": "\\underset { \\begin{array}{c} q \\sim P (Q), \\\\ \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\theta_ {\\mathrm {o l d}}} (O | q) \\end{array} } {\\mathbb {E}} \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\left(\\min \\left(\\frac {\\pi_ {\\theta} (o _ {i} | q)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} (o _ {i} | q)} A _ {i}, \\operatorname {c l i p p e d} \\left(\\frac {\\pi_ {\\theta} (o _ {i} | q)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} (o _ {i} | q)}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {i}\\right) - \\beta \\mathbb {D} _ {\\mathrm {K L}} (\\pi_ {\\theta} | | \\pi_ {\\mathrm {r e f}})\\right) \\right].", + "image_path": "2d53ec46e288acc29b44cc0d655077b12bf43b901a29c7b20ed7cb6a8828df61.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 228, + 441, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 228, + 441, + 242 + ], + "spans": [ + { + "bbox": [ + 50, + 228, + 441, + 242 + ], + "type": "text", + "content": "Here " + }, + { + "bbox": [ + 50, + 228, + 441, + 242 + ], + "type": "inline_equation", + "content": "A_{i}" + }, + { + "bbox": [ + 50, + 228, + 441, + 242 + ], + "type": "text", + "content": " denotes the advantage computed from a group of rewards " + }, + { + "bbox": [ + 50, + 228, + 441, + 242 + ], + "type": "inline_equation", + "content": "\\{r_1,r_2,\\dots ,r_G\\}" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 227, + 250, + 383, + 279 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 250, + 383, + 279 + ], + "spans": [ + { + "bbox": [ + 227, + 250, + 383, + 279 + ], + "type": "interline_equation", + "content": "A _ {i} = \\frac {r _ {i} - \\mathrm {m e a n} (\\{r _ {1} , r _ {2} , \\ldots , r _ {G} \\})}{\\mathrm {s t d} (\\{r _ {1} , r _ {2} , \\ldots , r _ {G} \\})},", + "image_path": "57521b0bd22d4166810982ee97c9c205d00f9dd5807c6052a1f6dc3adb4e9981.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 288, + 73, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 288, + 73, + 300 + ], + "spans": [ + { + "bbox": [ + 50, + 288, + 73, + 300 + ], + "type": "text", + "content": "and" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 192, + 297, + 419, + 327 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 297, + 419, + 327 + ], + "spans": [ + { + "bbox": [ + 192, + 297, + 419, + 327 + ], + "type": "interline_equation", + "content": "\\mathbb {D} _ {\\mathrm {K L}} (\\pi_ {\\theta} | | \\pi_ {\\mathrm {r e f}}) = \\frac {\\pi_ {\\mathrm {r e f}} (o _ {i} | q)}{\\pi_ {\\theta} (o _ {i} | q)} - \\log \\frac {\\pi_ {\\mathrm {r e f}} (o _ {i} | q)}{\\pi_ {\\theta} (o _ {i} | q)} - 1.", + "image_path": "e163250b6fb559cde051ed6f39a9dabe05f4228399aa036ce5a4958d8e4ae7d9.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 331, + 492, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 331, + 492, + 345 + ], + "spans": [ + { + "bbox": [ + 50, + 331, + 492, + 345 + ], + "type": "text", + "content": "Note that " + }, + { + "bbox": [ + 50, + 331, + 492, + 345 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 50, + 331, + 492, + 345 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 331, + 492, + 345 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 50, + 331, + 492, + 345 + ], + "type": "text", + "content": " are parameters controlling the clipping range and KL penalty, respectively." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 365, + 171, + 377 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 365, + 171, + 377 + ], + "spans": [ + { + "bbox": [ + 51, + 365, + 171, + 377 + ], + "type": "text", + "content": "B.2. LoRA Formulation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 388, + 559, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 388, + 559, + 433 + ], + "spans": [ + { + "bbox": [ + 50, + 388, + 559, + 433 + ], + "type": "text", + "content": "We follow the standard LoRA setup (Hu et al., 2021). Given a frozen pretrained weight matrix " + }, + { + "bbox": [ + 50, + 388, + 559, + 433 + ], + "type": "inline_equation", + "content": "W_0 \\in \\mathbb{R}^{d \\times k}" + }, + { + "bbox": [ + 50, + 388, + 559, + 433 + ], + "type": "text", + "content": " and trainable low-rank matrices " + }, + { + "bbox": [ + 50, + 388, + 559, + 433 + ], + "type": "inline_equation", + "content": "A \\in \\mathbb{R}^{d \\times r}" + }, + { + "bbox": [ + 50, + 388, + 559, + 433 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 388, + 559, + 433 + ], + "type": "inline_equation", + "content": "B \\in \\mathbb{R}^{r \\times k}" + }, + { + "bbox": [ + 50, + 388, + 559, + 433 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 50, + 388, + 559, + 433 + ], + "type": "inline_equation", + "content": "r \\ll \\min(d, k)" + }, + { + "bbox": [ + 50, + 388, + 559, + 433 + ], + "type": "text", + "content": ", the original forward pass " + }, + { + "bbox": [ + 50, + 388, + 559, + 433 + ], + "type": "inline_equation", + "content": "h(x) = W_0 x" + }, + { + "bbox": [ + 50, + 388, + 559, + 433 + ], + "type": "text", + "content": " is modified as" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 257, + 441, + 352, + 457 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 441, + 352, + 457 + ], + "spans": [ + { + "bbox": [ + 257, + 441, + 352, + 457 + ], + "type": "interline_equation", + "content": "\\hat {h} (x) = W _ {0} x + A B x.", + "image_path": "28a2994065a07ca1b55ef8c44cddfb26dc2e52400324d7f5375371de6576060e.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 50, + 467, + 515, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 467, + 515, + 482 + ], + "spans": [ + { + "bbox": [ + 50, + 467, + 515, + 482 + ], + "type": "text", + "content": "We use the default LoRA implementation provided in the PEFT (Mangrulkar et al., 2022) library." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 235, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 235, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 548, + 750, + 558, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 548, + 750, + 558, + 758 + ], + "spans": [ + { + "bbox": [ + 548, + 750, + 558, + 758 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 83, + 264, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 83, + 264, + 99 + ], + "spans": [ + { + "bbox": [ + 51, + 83, + 264, + 99 + ], + "type": "text", + "content": "C. Additional Experimental Details" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 111, + 168, + 125 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 111, + 168, + 125 + ], + "spans": [ + { + "bbox": [ + 51, + 111, + 168, + 125 + ], + "type": "text", + "content": "C.1. Hyperparameters" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 133, + 512, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 133, + 512, + 148 + ], + "spans": [ + { + "bbox": [ + 50, + 133, + 512, + 148 + ], + "type": "text", + "content": "We show our default choice of hyperparameter in Table 5 for all the LoRA-based RL experiments." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 173, + 156, + 435, + 501 + ], + "blocks": [ + { + "bbox": [ + 173, + 156, + 435, + 501 + ], + "lines": [ + { + "bbox": [ + 173, + 156, + 435, + 501 + ], + "spans": [ + { + "bbox": [ + 173, + 156, + 435, + 501 + ], + "type": "table", + "html": "
Tina-STILL-3-1.5B-previewLoRA
Tina-DeepScaleR-1.5B-PreviewLoRA
Tina-Open-RS{X}-{Y}LoRA
Tina-LIMR-{Z}LoRA
Tina-OpenR1LoRA
Tina-OpenThoughtsLoRA
LoRA Modulesquery, key, value, dense
LoRA Rank32
LoRA α128
LoRA Dropout0.05
AlgorithmGRPO
OptimizerAdamW
Optimizer Momentumβ1, β2 = 0.9, 0.999
Learning Rate1e-6
LR SchedulerCosine with Min LR
Warmup Ratio0.1
PrecisionBF16-mixed
Gradient Accumulation Step4
Total Train Batch Size32
Epochs1
Hardware2 × NVIDIA L40S
Max Prompt Length512
Max Completion Length3584
Number of Generation4
Vllm GPU Memory Utilization0.4
Vllm Max Model Length4608
", + "image_path": "8360271f6e05414bddb8422616944a29e0ee5c6272108bd1fee1ef9fbeefeb86.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 517, + 242, + 530 + ], + "lines": [ + { + "bbox": [ + 50, + 517, + 242, + 530 + ], + "spans": [ + { + "bbox": [ + 50, + 517, + 242, + 530 + ], + "type": "text", + "content": "Table 5: Common hyperparameter settings." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 50, + 544, + 561, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 544, + 561, + 586 + ], + "spans": [ + { + "bbox": [ + 50, + 544, + 561, + 586 + ], + "type": "text", + "content": "We also show the varied hyperparameter in Table 6 for all the LoRA-based RL experiments. Particularly, all the reward types including Accuracy, Format, Length, Cosine, Tag Count, Reasoning Steps, Repetition Penalty, are defined and implemented by the OpenR1 code repository.4" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 63, + 710, + 232, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 710, + 232, + 723 + ], + "spans": [ + { + "bbox": [ + 63, + 710, + 232, + 723 + ], + "type": "text", + "content": "4https://github.com/huggingface/open-r1" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 548, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 548, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 548, + 750, + 558, + 759 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 137, + 72, + 436, + 719 + ], + "blocks": [ + { + "bbox": [ + 137, + 72, + 436, + 719 + ], + "lines": [ + { + "bbox": [ + 137, + 72, + 436, + 719 + ], + "spans": [ + { + "bbox": [ + 137, + 72, + 436, + 719 + ], + "type": "table", + "html": "
ModelLoRA RankLoRA AlphaLoRA DropoutAlgorithmLearning RateReward TypeReward Weights
Tina-STILL-3-1.5B-preview-----Accuracy, Length2, 1
Tina-DeepScaleR-1.5B-Preview-----Accuracy, Format2, 1
Tina-Open-RS3-----Cosine, Format2, 1
Tina-Open-RS3-DrGRPO---DrGRPO-Cosine, Format2, 1
Tina-Open-RS2-----Accuracy, Format2, 1
Tina-Open-RS1-----Accuracy, Format2, 1
Tina-LIMR-----Accuracy, Format2, 1
Tina-LIMR-5e-6-lr----5e-6Accuracy, Format2, 1
Tina-LIMR-5e-7-lr----5e-7Accuracy, Format2, 1
Tina-LIMR-64-LoRA-rank64256---Accuracy, Format2, 1
Tina-LIMR-16-LoRA-rank1664---Accuracy, Format2, 1
Tina-LIMR-8-LoRA-rank832---Accuracy, Format2, 1
Tina-LIMR-4-LoRA-rank416---Accuracy, Format2, 1
Accuracy, Cosine, Format, Length, Tag Count, Reasoning Steps, Repetition Penalty1, 1, 1, 1, 1, 1
Tina-OpenR1-----Accuracy, Cosine, Format, Length, Tag Count, Reasoning Steps, Repetition Penalty1, 1, 1, 1, 1, 1
Tina-OpenThoughts-----
", + "image_path": "decc081a621463503dc87bdda5aa04774f28a8003bed84c4134f7d368e1c0ad1.jpg" + } + ] + } + ], + "index": 1, + "angle": 270, + "type": "table_body" + }, + { + "bbox": [ + 455, + 257, + 467, + 721 + ], + "lines": [ + { + "bbox": [ + 455, + 257, + 467, + 721 + ], + "spans": [ + { + "bbox": [ + 455, + 257, + 467, + 721 + ], + "type": "text", + "content": "Table 6: Varied hyperparameter settings where “-” means unchanged from the common settings in Table 5." + } + ] + } + ], + "index": 2, + "angle": 270, + "type": "table_footnote" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 237, + 55, + 373, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 55, + 373, + 64 + ], + "spans": [ + { + "bbox": [ + 237, + 55, + 373, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 548, + 751, + 558, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 548, + 751, + 558, + 758 + ], + "spans": [ + { + "bbox": [ + 548, + 751, + 558, + 758 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 86, + 187, + 98 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 86, + 187, + 98 + ], + "spans": [ + { + "bbox": [ + 53, + 86, + 187, + 98 + ], + "type": "text", + "content": "C.2. Evaluation Command" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 107, + 559, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 107, + 559, + 177 + ], + "spans": [ + { + "bbox": [ + 52, + 107, + 559, + 177 + ], + "type": "text", + "content": "The following is the evaluation command we use to combine lighteval and vLLM for performance evaluation on reasoning tasks. The MODEL_PATH should be replaced with either the local path or huggingface identifier to the model to be evaluated. TASK should be one of the six reasoning tasks including aime24, aime25, amc23, math_500, gpqa: diamond, and minerva. PATH_TO_OPEN_R1_EVALUATEScript should be the path to the custom evaluate script provided by OpenR1." + } + ] + } + ], + "index": 2 + }, + { + "type": "code", + "bbox": [ + 52, + 195, + 583, + 277 + ], + "blocks": [ + { + "bbox": [ + 52, + 195, + 583, + 277 + ], + "lines": [ + { + "bbox": [ + 52, + 195, + 583, + 277 + ], + "spans": [ + { + "bbox": [ + 52, + 195, + 583, + 277 + ], + "type": "text", + "content": "MODEL Arguments=\"pretrained=\\(MODEL_PATH, dtype=float16, data_parallel_size=2, max_model_length=32768, gpu_memory Utilization=0.5, generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}\"" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "code", + "guess_lang": "python" + }, + { + "type": "code", + "bbox": [ + 52, + 290, + 375, + 330 + ], + "blocks": [ + { + "bbox": [ + 52, + 290, + 375, + 330 + ], + "lines": [ + { + "bbox": [ + 52, + 290, + 375, + 330 + ], + "spans": [ + { + "bbox": [ + 52, + 290, + 375, + 330 + ], + "type": "text", + "content": "lighteval vllm $MODEL.argS \"custom|$TASK|0|0\"\n--custom-tasks $PATH_TO_OPEN_R1_EVALUATE-script\n--use-chat-template" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "shell" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 235, + 54, + 373, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 54, + 373, + 64 + ], + "spans": [ + { + "bbox": [ + 235, + 54, + 373, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 63, + 710, + 547, + 723 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 710, + 547, + 723 + ], + "spans": [ + { + "bbox": [ + 63, + 710, + 547, + 723 + ], + "type": "text", + "content": "5https://github.com/huggingface/open-r1/blob/4f5b21e21dec473af9729bce8e084deb16223ae4/src/open_r1/Evaluate.py" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 548, + 751, + 557, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 548, + 751, + 557, + 758 + ], + "spans": [ + { + "bbox": [ + 548, + 751, + 557, + 758 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 83, + 313, + 98 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 83, + 313, + 98 + ], + "spans": [ + { + "bbox": [ + 53, + 83, + 313, + 98 + ], + "type": "text", + "content": "D. Full Tina Model Performance Evaluation" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 110, + 559, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 110, + 559, + 137 + ], + "spans": [ + { + "bbox": [ + 52, + 110, + 559, + 137 + ], + "type": "text", + "content": "In this section, we present all Tina models' detailed evaluation performance during post-training across six reasoning tasks including AIME24/25, AMC23, MATH500, GPQA and Minerva." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 79, + 147, + 531, + 255 + ], + "blocks": [ + { + "bbox": [ + 79, + 147, + 531, + 255 + ], + "lines": [ + { + "bbox": [ + 79, + 147, + 531, + 255 + ], + "spans": [ + { + "bbox": [ + 79, + 147, + 531, + 255 + ], + "type": "table", + "html": "
CHECKPOINT STEPS (3740 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
50030.0013.3375.0083.6035.8632.3545.02
100036.6720.0065.0084.8032.3227.9444.46
150026.6720.0070.0083.8037.3726.8444.11
200036.6730.0077.5084.6033.3326.8448.16
250033.3330.0070.0083.0035.3527.5746.54
300030.0020.0067.5082.6030.8125.7442.78
350030.0026.6767.5082.2032.3226.1044.13
", + "image_path": "489481ae846805813de3af37b2eba7e7c9d549e8a430de5eda6c253bfc5d0a50.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 79, + 293, + 531, + 437 + ], + "blocks": [ + { + "bbox": [ + 52, + 263, + 356, + 275 + ], + "lines": [ + { + "bbox": [ + 52, + 263, + 356, + 275 + ], + "spans": [ + { + "bbox": [ + 52, + 263, + 356, + 275 + ], + "type": "text", + "content": "Table 7: Performance evaluation of Tina-STILL-3-1.5B-preview." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 79, + 293, + 531, + 437 + ], + "lines": [ + { + "bbox": [ + 79, + 293, + 531, + 437 + ], + "spans": [ + { + "bbox": [ + 79, + 293, + 531, + 437 + ], + "type": "table", + "html": "
CHECKPOINT STEPS (5039 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
50030.0023.3367.5082.4039.3931.2545.65
100043.3326.6767.5086.2037.8828.6848.38
150030.0020.0080.0084.8032.8329.4146.17
200020.0026.6757.5080.6029.2924.2639.72
250013.3316.6752.5075.0031.3118.0134.47
300026.6716.6757.5078.6028.7923.1638.57
350023.3323.3362.5080.4031.8224.2640.94
400020.0020.0070.0082.0041.4127.9443.56
450023.3320.0072.5080.8034.8526.4742.99
500020.0026.6775.0080.8033.3329.4144.20
", + "image_path": "1ee0c6d37cc65fe717551ff9bea8909244dc02f1b5139c065c55b0e684f3a275.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 447, + 373, + 459 + ], + "lines": [ + { + "bbox": [ + 52, + 447, + 373, + 459 + ], + "spans": [ + { + "bbox": [ + 52, + 447, + 373, + 459 + ], + "type": "text", + "content": "Table 8: Performance evaluation of Tina-DeepScaleR-1.5B-Preview." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 237, + 55, + 373, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 55, + 373, + 64 + ], + "spans": [ + { + "bbox": [ + 237, + 55, + 373, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 548, + 751, + 558, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 548, + 751, + 558, + 757 + ], + "spans": [ + { + "bbox": [ + 548, + 751, + 558, + 757 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 79, + 110, + 531, + 350 + ], + "blocks": [ + { + "bbox": [ + 79, + 110, + 531, + 350 + ], + "lines": [ + { + "bbox": [ + 79, + 110, + 531, + 350 + ], + "spans": [ + { + "bbox": [ + 79, + 110, + 531, + 350 + ], + "type": "table", + "html": "
CHECKPOINT STEPS (875 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5026.6723.3375.0084.2037.3729.0445.94
10030.0030.0065.0083.0037.3729.7845.86
15036.6716.6765.0084.8027.7827.9443.14
20020.0026.6770.0083.8033.3327.9443.62
25036.6720.0065.0084.6038.3828.3145.49
30033.3326.6770.0085.2030.8130.1546.03
35040.0016.6777.5084.4039.9027.9447.74
40030.0016.6770.0082.8035.8631.2544.43
45036.6726.6770.0085.6033.8432.7247.58
50036.6723.3382.5085.2037.3731.6249.45
55026.6716.6780.0086.0035.3529.7845.75
60030.0026.6770.0084.6037.8829.7846.49
65020.0023.3380.0085.0033.3327.9444.93
70033.3313.3372.5085.0040.4031.9946.09
75033.3323.3375.0083.6031.3127.5745.69
80030.0023.3365.0084.2038.3829.0444.99
85026.6726.6775.0083.8031.8227.9445.32
", + "image_path": "f3bb7eda6f0c5f44a8d6892ff46ddcd98d6c1bdaa0dba782f01b360e1e83989e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 79, + 432, + 531, + 672 + ], + "blocks": [ + { + "bbox": [ + 52, + 359, + 287, + 371 + ], + "lines": [ + { + "bbox": [ + 52, + 359, + 287, + 371 + ], + "spans": [ + { + "bbox": [ + 52, + 359, + 287, + 371 + ], + "type": "text", + "content": "Table 9: Performance evaluation of Tina-0pen-RS3." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 79, + 432, + 531, + 672 + ], + "lines": [ + { + "bbox": [ + 79, + 432, + 531, + 672 + ], + "spans": [ + { + "bbox": [ + 79, + 432, + 531, + 672 + ], + "type": "table", + "html": "
CHECKPOINT STEPS (875 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5033.3323.3377.5084.2038.8929.0447.72
10036.6723.3372.5084.2031.3128.6846.12
15040.0023.3372.5085.8030.3030.5147.07
20026.6723.3370.0083.8039.3929.4145.43
25046.6713.3372.5082.6031.8230.5146.24
30030.0026.6775.0084.0033.3329.0446.34
35033.3320.0075.0084.8037.3728.6846.53
40026.6716.6770.0083.2037.3727.5743.58
45043.3326.6777.5087.0036.3632.7250.60
50020.0023.3367.5084.2033.8429.4143.05
55040.0023.3372.5083.6040.9130.8848.54
60033.3320.0072.5084.2032.8330.8845.62
65033.3323.3357.5083.8034.8530.5143.89
70023.3326.6770.0082.4033.3328.6844.07
75030.0023.3372.5084.2038.8929.0446.33
80030.0026.6775.0084.4032.3229.4146.30
85026.6723.3370.0083.8035.8628.6844.72
", + "image_path": "4ca7537e6fe64a4ec131c6ed4f8cf75a1a629a084b6eeac69db32310ad2ec862.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 681, + 292, + 693 + ], + "lines": [ + { + "bbox": [ + 52, + 681, + 292, + 693 + ], + "spans": [ + { + "bbox": [ + 52, + 681, + 292, + 693 + ], + "type": "text", + "content": "Table 10: Performance evaluation of Tina-0pen-RS2." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 237, + 55, + 373, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 55, + 373, + 64 + ], + "spans": [ + { + "bbox": [ + 237, + 55, + 373, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 548, + 751, + 558, + 757 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 548, + 751, + 558, + 757 + ], + "spans": [ + { + "bbox": [ + 548, + 751, + 558, + 757 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 79, + 91, + 531, + 249 + ], + "blocks": [ + { + "bbox": [ + 79, + 91, + 531, + 249 + ], + "lines": [ + { + "bbox": [ + 79, + 91, + 531, + 249 + ], + "spans": [ + { + "bbox": [ + 79, + 91, + 531, + 249 + ], + "type": "table", + "html": "
CHECKPOINT STEPS (2327 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
40033.3320.0075.0083.8031.8229.7845.62
60030.0030.0077.5084.2034.3431.6247.94
80043.3320.0080.0084.0035.3528.6848.56
100033.3320.0082.5084.4035.8629.7847.64
120036.6720.0067.5084.4037.8830.1546.10
140030.0020.0067.5083.4031.8229.7843.75
160023.3313.3365.0083.4035.8626.8441.29
180026.6720.0075.0084.2034.3427.5744.63
200030.0026.6772.5083.0036.3627.9446.08
220030.0023.3370.0081.4030.8126.4743.67
240030.0023.3367.5081.8030.3027.5743.42
", + "image_path": "b4fc11af2a42cf76a840b1a3932aee83b2dd2f3789d5b131300587c7d4a21f96.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 79, + 291, + 531, + 358 + ], + "blocks": [ + { + "bbox": [ + 52, + 258, + 292, + 270 + ], + "lines": [ + { + "bbox": [ + 52, + 258, + 292, + 270 + ], + "spans": [ + { + "bbox": [ + 52, + 258, + 292, + 270 + ], + "type": "text", + "content": "Table 11: Performance evaluation of Tina-0pen-RS1." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 79, + 291, + 531, + 358 + ], + "lines": [ + { + "bbox": [ + 79, + 291, + 531, + 358 + ], + "spans": [ + { + "bbox": [ + 79, + 291, + 531, + 358 + ], + "type": "table", + "html": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
5020.0026.6767.5085.4037.8830.5144.66
10046.6720.0075.0083.8034.8530.5148.47
15026.6720.0072.5084.0037.3730.1545.12
20033.3330.0062.5083.4029.8030.8844.99
", + "image_path": "5ca376c85eec1315968cb1b29eabeab870f772630b1bec03adc8b5ab0477fc7f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 79, + 400, + 531, + 506 + ], + "blocks": [ + { + "bbox": [ + 52, + 367, + 269, + 378 + ], + "lines": [ + { + "bbox": [ + 52, + 367, + 269, + 378 + ], + "spans": [ + { + "bbox": [ + 52, + 367, + 269, + 378 + ], + "type": "text", + "content": "Table 12: Performance evaluation of Tina-LIMR." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 79, + 400, + 531, + 506 + ], + "lines": [ + { + "bbox": [ + 79, + 400, + 531, + 506 + ], + "spans": [ + { + "bbox": [ + 79, + 400, + 531, + 506 + ], + "type": "table", + "html": "
CHECKPOINT STEPS (11716 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
50030.0020.0077.5085.2033.8430.1546.12
100030.0023.3372.5085.6033.8426.6745.32
150036.6726.6775.0086.8039.9030.5149.26
200026.6723.3367.5083.2029.8031.6243.69
250030.0023.3372.5083.8033.8426.8445.05
300020.0030.0067.5084.6034.3428.3144.13
350036.6723.3367.5083.6031.3125.7444.69
", + "image_path": "259a17837b341a15b4289b19971555d0291ddd0ca62202c001f2e7d7e7a57c66.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 79, + 548, + 531, + 693 + ], + "blocks": [ + { + "bbox": [ + 52, + 514, + 281, + 526 + ], + "lines": [ + { + "bbox": [ + 52, + 514, + 281, + 526 + ], + "spans": [ + { + "bbox": [ + 52, + 514, + 281, + 526 + ], + "type": "text", + "content": "Table 13: Performance evaluation of Tina-0penR1." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 79, + 548, + 531, + 693 + ], + "lines": [ + { + "bbox": [ + 79, + 548, + 531, + 693 + ], + "spans": [ + { + "bbox": [ + 79, + 548, + 531, + 693 + ], + "type": "table", + "html": "
CHECKPOINT STEPS (8259 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
50033.3016.6777.5084.2035.8630.1546.28
100033.3323.3380.0085.2024.7532.7246.56
150030.0023.3370.0086.0037.8829.0446.04
200030.0023.3370.0084.2033.3328.3144.86
250036.6726.6772.5084.8041.4133.0949.19
300026.6723.3375.0083.6034.3432.7245.94
350020.0016.6760.0084.2032.3226.1039.88
400033.3323.3372.5083.6038.3827.9446.51
450030.0020.0065.0085.0033.8426.8443.45
500020.0033.3365.0084.8040.9130.8845.82
", + "image_path": "d67dbf5f5b71c11036cb87eea3eff99ae62340ff38a964f0b90e4e59d337bcbb.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 701, + 315, + 713 + ], + "lines": [ + { + "bbox": [ + 52, + 701, + 315, + 713 + ], + "spans": [ + { + "bbox": [ + 52, + 701, + 315, + 713 + ], + "type": "text", + "content": "Table 14: Performance evaluation of Tina-OpenThoughts." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 373, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 373, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 373, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 548, + 751, + 558, + 758 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 548, + 751, + 558, + 758 + ], + "spans": [ + { + "bbox": [ + 548, + 751, + 558, + 758 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 78, + 94, + 532, + 334 + ], + "blocks": [ + { + "bbox": [ + 78, + 94, + 532, + 334 + ], + "lines": [ + { + "bbox": [ + 78, + 94, + 532, + 334 + ], + "spans": [ + { + "bbox": [ + 78, + 94, + 532, + 334 + ], + "type": "table", + "html": "
CHECKPOINT STEPS (875 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5033.3316.6775.0083.8037.3726.8445.50
10016.6720.0070.0083.2033.3326.4741.61
15043.3323.3380.0085.0035.3530.1549.53
20030.0023.3370.0084.0039.9028.6845.99
25033.3330.0065.0083.8034.3428.3145.80
30036.6716.6767.5084.4037.8829.7845.48
35026.6730.0075.0084.0037.8829.7847.22
40036.6723.3372.5084.4032.8327.5746.22
45036.6716.6772.5085.6029.2927.5744.72
50030.0020.0072.5085.6037.3729.4145.81
55030.0023.3377.5084.8036.8731.6247.35
60033.3326.6772.5083.8030.3028.3145.82
65026.6720.0077.5082.4037.8827.9445.40
70036.6720.0080.0083.8035.3531.2547.85
75030.0026.6775.0084.2038.8927.5747.06
80020.0030.0075.0082.4035.8628.3145.26
85023.3320.0072.5085.4036.3630.1544.62
", + "image_path": "34438dc604081a53d0f3b2eb9a2446c6116792b8fa9876c28dc8d52561641a4a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 79, + 384, + 531, + 453 + ], + "blocks": [ + { + "bbox": [ + 51, + 342, + 333, + 355 + ], + "lines": [ + { + "bbox": [ + 51, + 342, + 333, + 355 + ], + "spans": [ + { + "bbox": [ + 51, + 342, + 333, + 355 + ], + "type": "text", + "content": "Table 15: Performance evaluation of Tina-0pen-RS3-DrGRPO." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 79, + 384, + 531, + 453 + ], + "lines": [ + { + "bbox": [ + 79, + 384, + 531, + 453 + ], + "spans": [ + { + "bbox": [ + 79, + 384, + 531, + 453 + ], + "type": "table", + "html": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5020.0026.6767.5085.4037.8830.5144.66
10046.6720.0075.0083.8034.8530.5148.47
15026.6720.0072.5084.0037.3730.1545.12
20033.3330.0062.5083.4029.8030.8844.99
", + "image_path": "2f6aefc175134f265b1b159383b902b03d40a9cce69d5e4661266307f817b9d4.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 79, + 502, + 531, + 571 + ], + "blocks": [ + { + "bbox": [ + 51, + 460, + 422, + 473 + ], + "lines": [ + { + "bbox": [ + 51, + 460, + 422, + 473 + ], + "spans": [ + { + "bbox": [ + 51, + 460, + 422, + 473 + ], + "type": "text", + "content": "Table 16: Performance evaluation of Tina-LIMR-5e-6-1r with learning rate 5e-6." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 79, + 502, + 531, + 571 + ], + "lines": [ + { + "bbox": [ + 79, + 502, + 531, + 571 + ], + "spans": [ + { + "bbox": [ + 79, + 502, + 531, + 571 + ], + "type": "table", + "html": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5040.0013.3372.5083.0034.3429.0445.37
10043.3316.6777.5084.6034.8530.5147.91
15030.0023.3372.5086.2037.3730.5146.65
20033.3313.3370.0083.2029.2931.2543.40
", + "image_path": "3bceae34b8cb8c4220f13aedaf6a0d44948a79c1bbfe8ae7d3cc92a35320ed29.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 79, + 620, + 531, + 689 + ], + "blocks": [ + { + "bbox": [ + 51, + 578, + 422, + 591 + ], + "lines": [ + { + "bbox": [ + 51, + 578, + 422, + 591 + ], + "spans": [ + { + "bbox": [ + 51, + 578, + 422, + 591 + ], + "type": "text", + "content": "Table 17: Performance evaluation of Tina-LIMR-5e-7-1r with learning rate 5e-7." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 79, + 620, + 531, + 689 + ], + "lines": [ + { + "bbox": [ + 79, + 620, + 531, + 689 + ], + "spans": [ + { + "bbox": [ + 79, + 620, + 531, + 689 + ], + "type": "table", + "html": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5020.0030.0077.5084.2038.3831.6246.95
10030.0023.3372.5084.6032.3229.7845.42
15036.6720.0070.0083.4031.8230.8845.46
20033.3320.0072.5085.0029.8029.4145.01
", + "image_path": "3f7a8c1ad3cb2877b0af8755abcdcfe54a8790edb795d23763fd06181cd40a01.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 696, + 497, + 709 + ], + "lines": [ + { + "bbox": [ + 51, + 696, + 497, + 709 + ], + "spans": [ + { + "bbox": [ + 51, + 696, + 497, + 709 + ], + "type": "text", + "content": "Table 18: Performance evaluation of Tina-LIMR-64-LoRA-rank with LoRA rank 64 and alpha 512." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 79, + 83, + 531, + 152 + ], + "blocks": [ + { + "bbox": [ + 79, + 83, + 531, + 152 + ], + "lines": [ + { + "bbox": [ + 79, + 83, + 531, + 152 + ], + "spans": [ + { + "bbox": [ + 79, + 83, + 531, + 152 + ], + "type": "table", + "html": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINervaAvg.
5033.3323.3362.5084.2038.8931.2545.58
10043.3333.3370.0083.2035.3528.3148.92
15026.6716.6772.5083.4035.3529.0443.94
20036.6720.0075.0083.0039.3930.5147.43
", + "image_path": "e30c2021c32c397470ab042d61912768d1d764516c4830521a738697490ba23f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 79, + 190, + 531, + 258 + ], + "blocks": [ + { + "bbox": [ + 52, + 160, + 490, + 173 + ], + "lines": [ + { + "bbox": [ + 52, + 160, + 490, + 173 + ], + "spans": [ + { + "bbox": [ + 52, + 160, + 490, + 173 + ], + "type": "text", + "content": "Table 19: Performance evaluation of Tina-LIMR-16-LoRA-rank with LoRA rank 16 and alpha 64." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 79, + 190, + 531, + 258 + ], + "lines": [ + { + "bbox": [ + 79, + 190, + 531, + 258 + ], + "spans": [ + { + "bbox": [ + 79, + 190, + 531, + 258 + ], + "type": "table", + "html": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
5030.0026.6782.5083.8033.8430.5147.89
10026.6716.6772.5084.0036.8729.7844.42
15053.3320.0060.0083.2037.3730.8847.46
20023.3320.0072.5085.4032.8328.6843.86
", + "image_path": "2863c059feaad5bb8d161330289c274b045111a2d87ecc5977c5da81987d3f33.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 79, + 296, + 531, + 364 + ], + "blocks": [ + { + "bbox": [ + 52, + 266, + 478, + 278 + ], + "lines": [ + { + "bbox": [ + 52, + 266, + 478, + 278 + ], + "spans": [ + { + "bbox": [ + 52, + 266, + 478, + 278 + ], + "type": "text", + "content": "Table 20: Performance evaluation of Tina-LIMR-8-LoRA-rank with LoRA rank 8 and alpha 32." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 79, + 296, + 531, + 364 + ], + "lines": [ + { + "bbox": [ + 79, + 296, + 531, + 364 + ], + "spans": [ + { + "bbox": [ + 79, + 296, + 531, + 364 + ], + "type": "table", + "html": "
CHECKPOINT STEPS (174 STEPS PER EPOCH)AIME24AIME25AMC23MATH500GPQAMINERVAAvg.
5030.0023.3365.0085.0035.3529.7844.74
10026.6726.6772.5082.8034.8529.0445.42
15036.6720.0085.0083.8031.8229.047.72
20033.3323.3377.5085.4035.8628.3147.29
", + "image_path": "87ea49257f6941cd6e4541f776c070114d4a35cf437f8e9258c5111a25444c07.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 372, + 478, + 384 + ], + "lines": [ + { + "bbox": [ + 52, + 372, + 478, + 384 + ], + "spans": [ + { + "bbox": [ + 52, + 372, + 478, + 384 + ], + "type": "text", + "content": "Table 21: Performance evaluation of Tina-LIMR-4-LoRA-rank with LoRA rank 4 and alpha 16." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 373, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 373, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 373, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 548, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 548, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 548, + 750, + 558, + 759 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 83, + 326, + 100 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 83, + 326, + 100 + ], + "spans": [ + { + "bbox": [ + 51, + 83, + 326, + 100 + ], + "type": "text", + "content": "E. Full Tina Model Training Phase Transition" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 110, + 561, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 110, + 561, + 220 + ], + "spans": [ + { + "bbox": [ + 49, + 110, + 561, + 220 + ], + "type": "text", + "content": "In this section, we present all Tina models' training phase transitions along the training dynamics. Specifically, we observe clear phase transitions in the training of Tina-DeepScaleR-1.5B-Preview, Tina-STILL-3-1.5B-preview, Tina-Open-RS1, Tina-Open-RS2, Tina-Open-RS3, and Tina-Open-RS3-GRPO, as shown in Figures 5, 6, and 7. For Tina-OpenR1 and Tina-Thoughts (Figures 8 and 9), the observation is similar, except the best-performing checkpoint is achieved after the training turning point, rather than before. However, we do not observe such a transition in all Tina variants on the LIMR dataset, as shown in Figures 10, 11, and 12, possibly because its small data size leads to training periods which are too brief to extract meaningful information." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 76, + 230, + 303, + 360 + ], + "blocks": [ + { + "bbox": [ + 76, + 230, + 303, + 360 + ], + "lines": [ + { + "bbox": [ + 76, + 230, + 303, + 360 + ], + "spans": [ + { + "bbox": [ + 76, + 230, + 303, + 360 + ], + "type": "image", + "image_path": "d353ef6fc3f55dfc1422f1740441c1355ae02f826e634eac7f3afe7a8106e2b5.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 307, + 230, + 533, + 360 + ], + "blocks": [ + { + "bbox": [ + 307, + 230, + 533, + 360 + ], + "lines": [ + { + "bbox": [ + 307, + 230, + 533, + 360 + ], + "spans": [ + { + "bbox": [ + 307, + 230, + 533, + 360 + ], + "type": "image", + "image_path": "e227c2e22316cb4b9419f97f054e7fabe4740818619f91e7c8b4fe40c3152bac.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 76, + 363, + 302, + 495 + ], + "blocks": [ + { + "bbox": [ + 76, + 363, + 302, + 495 + ], + "lines": [ + { + "bbox": [ + 76, + 363, + 302, + 495 + ], + "spans": [ + { + "bbox": [ + 76, + 363, + 302, + 495 + ], + "type": "image", + "image_path": "0e2732e82b9d5d73f809168cfbc974b98f9f0044648299abe8a4b63ae8b60533.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 307, + 363, + 533, + 495 + ], + "blocks": [ + { + "bbox": [ + 307, + 363, + 533, + 495 + ], + "lines": [ + { + "bbox": [ + 307, + 363, + 533, + 495 + ], + "spans": [ + { + "bbox": [ + 307, + 363, + 533, + 495 + ], + "type": "image", + "image_path": "94c0a15fc368a7728149a7406671dbf227cdd83e77da83147f99fd9a06986856.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 77, + 497, + 302, + 628 + ], + "blocks": [ + { + "bbox": [ + 77, + 497, + 302, + 628 + ], + "lines": [ + { + "bbox": [ + 77, + 497, + 302, + 628 + ], + "spans": [ + { + "bbox": [ + 77, + 497, + 302, + 628 + ], + "type": "image", + "image_path": "14dd99703ddfbd21f42c03322ea1708b90ea5a70843aba660bc25ee705707508.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 638, + 560, + 664 + ], + "lines": [ + { + "bbox": [ + 50, + 638, + 560, + 664 + ], + "spans": [ + { + "bbox": [ + 50, + 638, + 560, + 664 + ], + "type": "text", + "content": "Figure 5: Phase transition in Tina-DeepScaleR-1.5B-Preview and Tina-STILL-3-1.5B-Preview. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 307, + 497, + 533, + 628 + ], + "blocks": [ + { + "bbox": [ + 307, + 497, + 533, + 628 + ], + "lines": [ + { + "bbox": [ + 307, + 497, + 533, + 628 + ], + "spans": [ + { + "bbox": [ + 307, + 497, + 533, + 628 + ], + "type": "image", + "image_path": "0b36de5f11dfc263cdad476fb93fe133556cacd1abefa32b7b6253470e317fec.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 76, + 186, + 301, + 316 + ], + "blocks": [ + { + "bbox": [ + 76, + 186, + 301, + 316 + ], + "lines": [ + { + "bbox": [ + 76, + 186, + 301, + 316 + ], + "spans": [ + { + "bbox": [ + 76, + 186, + 301, + 316 + ], + "type": "image", + "image_path": "bc4af8c268796a19b9e7ca41828353aa9b004373143e6b9529eeeb8ed4804fab.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 307, + 186, + 533, + 316 + ], + "blocks": [ + { + "bbox": [ + 307, + 186, + 533, + 316 + ], + "lines": [ + { + "bbox": [ + 307, + 186, + 533, + 316 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 533, + 316 + ], + "type": "image", + "image_path": "b191a5bf97faa7c45c1fee1dc686ecc9bfe74d6f70924690bd7ccbbb418b18d1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 77, + 319, + 301, + 449 + ], + "blocks": [ + { + "bbox": [ + 77, + 319, + 301, + 449 + ], + "lines": [ + { + "bbox": [ + 77, + 319, + 301, + 449 + ], + "spans": [ + { + "bbox": [ + 77, + 319, + 301, + 449 + ], + "type": "image", + "image_path": "473a1311c96974b772b1c964513df3b56b622e527385e32bcac20487ab0f608b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 307, + 319, + 533, + 449 + ], + "blocks": [ + { + "bbox": [ + 307, + 319, + 533, + 449 + ], + "lines": [ + { + "bbox": [ + 307, + 319, + 533, + 449 + ], + "spans": [ + { + "bbox": [ + 307, + 319, + 533, + 449 + ], + "type": "image", + "image_path": "125d835e1868d60ed3c9237be1434952fbe0d18bf4c96c75bd2bcb9459f8c2c9.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 77, + 453, + 301, + 583 + ], + "blocks": [ + { + "bbox": [ + 77, + 453, + 301, + 583 + ], + "lines": [ + { + "bbox": [ + 77, + 453, + 301, + 583 + ], + "spans": [ + { + "bbox": [ + 77, + 453, + 301, + 583 + ], + "type": "image", + "image_path": "cb82a48a302ec32484b525d394a3919acf1c3e6f8775d6f31572654513eb4bf8.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 594, + 560, + 620 + ], + "lines": [ + { + "bbox": [ + 50, + 594, + 560, + 620 + ], + "spans": [ + { + "bbox": [ + 50, + 594, + 560, + 620 + ], + "type": "text", + "content": "Figure 6: Phase transition in Tina-0pen-RS1 and Tina-0pen-RS2. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 307, + 453, + 533, + 583 + ], + "blocks": [ + { + "bbox": [ + 307, + 453, + 533, + 583 + ], + "lines": [ + { + "bbox": [ + 307, + 453, + 533, + 583 + ], + "spans": [ + { + "bbox": [ + 307, + 453, + 533, + 583 + ], + "type": "image", + "image_path": "00890f19ec10de6d30defeffa76f84c3755edb43396146fd6ecd1a3608db87a5.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 76, + 186, + 302, + 316 + ], + "blocks": [ + { + "bbox": [ + 76, + 186, + 302, + 316 + ], + "lines": [ + { + "bbox": [ + 76, + 186, + 302, + 316 + ], + "spans": [ + { + "bbox": [ + 76, + 186, + 302, + 316 + ], + "type": "image", + "image_path": "552fe327e350e53b6edd0dae81e7b1d91bfa404f3cf9e8181fcb5171edcfe6e2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 307, + 186, + 533, + 316 + ], + "blocks": [ + { + "bbox": [ + 307, + 186, + 533, + 316 + ], + "lines": [ + { + "bbox": [ + 307, + 186, + 533, + 316 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 533, + 316 + ], + "type": "image", + "image_path": "6860139cead7f0972829f859c4daa9a36307284fcbfceeda15ecec531e559f3c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 77, + 319, + 302, + 449 + ], + "blocks": [ + { + "bbox": [ + 77, + 319, + 302, + 449 + ], + "lines": [ + { + "bbox": [ + 77, + 319, + 302, + 449 + ], + "spans": [ + { + "bbox": [ + 77, + 319, + 302, + 449 + ], + "type": "image", + "image_path": "496fcd61ed8d480b0945f217eb5a6f72daca5483eeed7daf40f65de2b5759fde.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 307, + 319, + 533, + 449 + ], + "blocks": [ + { + "bbox": [ + 307, + 319, + 533, + 449 + ], + "lines": [ + { + "bbox": [ + 307, + 319, + 533, + 449 + ], + "spans": [ + { + "bbox": [ + 307, + 319, + 533, + 449 + ], + "type": "image", + "image_path": "da84d29ddbc030bb2732b675abae652384353c1ddbd7c6ca78bb6eac68830c8a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 77, + 453, + 302, + 583 + ], + "blocks": [ + { + "bbox": [ + 77, + 453, + 302, + 583 + ], + "lines": [ + { + "bbox": [ + 77, + 453, + 302, + 583 + ], + "spans": [ + { + "bbox": [ + 77, + 453, + 302, + 583 + ], + "type": "image", + "image_path": "2ada93971be1b948ee6a2f2f11c7befda25eb181ff27c2b09831de984ff64ceb.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 594, + 560, + 620 + ], + "lines": [ + { + "bbox": [ + 50, + 594, + 560, + 620 + ], + "spans": [ + { + "bbox": [ + 50, + 594, + 560, + 620 + ], + "type": "text", + "content": "Figure 7: Phase transition in Tina-Open-RS3 and Tina-Open-RS3-GRPO. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 307, + 453, + 533, + 583 + ], + "blocks": [ + { + "bbox": [ + 307, + 453, + 533, + 583 + ], + "lines": [ + { + "bbox": [ + 307, + 453, + 533, + 583 + ], + "spans": [ + { + "bbox": [ + 307, + 453, + 533, + 583 + ], + "type": "image", + "image_path": "6cab3827b7f7c4ceceade4215a0f8483d0bde3adef8952c5ce5f9ed175c0d13d.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 119, + 302, + 249 + ], + "blocks": [ + { + "bbox": [ + 78, + 119, + 302, + 249 + ], + "lines": [ + { + "bbox": [ + 78, + 119, + 302, + 249 + ], + "spans": [ + { + "bbox": [ + 78, + 119, + 302, + 249 + ], + "type": "image", + "image_path": "9cea77735d3cf6c3ebdecad0073a818a5f4f1cca13aa643d18fce481c4540429.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 307, + 119, + 533, + 249 + ], + "blocks": [ + { + "bbox": [ + 307, + 119, + 533, + 249 + ], + "lines": [ + { + "bbox": [ + 307, + 119, + 533, + 249 + ], + "spans": [ + { + "bbox": [ + 307, + 119, + 533, + 249 + ], + "type": "image", + "image_path": "a288133d71b7c9dc147e7ee5f78170f5e2d79b32a4c40cea023285708958f07c.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 78, + 253, + 302, + 383 + ], + "blocks": [ + { + "bbox": [ + 78, + 253, + 302, + 383 + ], + "lines": [ + { + "bbox": [ + 78, + 253, + 302, + 383 + ], + "spans": [ + { + "bbox": [ + 78, + 253, + 302, + 383 + ], + "type": "image", + "image_path": "4ccbe6938b7d744c676eb276e92dafd3e742f1a5f1bd949cac46ae8d3ac3be6e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 308, + 253, + 533, + 383 + ], + "blocks": [ + { + "bbox": [ + 308, + 253, + 533, + 383 + ], + "lines": [ + { + "bbox": [ + 308, + 253, + 533, + 383 + ], + "spans": [ + { + "bbox": [ + 308, + 253, + 533, + 383 + ], + "type": "image", + "image_path": "bf53e4cf04bfe1cc8a3830d64c251087ca403806e1f7125338cfdeb5cdd2d04b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 78, + 387, + 302, + 516 + ], + "blocks": [ + { + "bbox": [ + 78, + 387, + 302, + 516 + ], + "lines": [ + { + "bbox": [ + 78, + 387, + 302, + 516 + ], + "spans": [ + { + "bbox": [ + 78, + 387, + 302, + 516 + ], + "type": "image", + "image_path": "38be411b859adab5f6a4a1d0e537338c225d76824738d7c50323bcdde5094d26.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 308, + 387, + 533, + 516 + ], + "blocks": [ + { + "bbox": [ + 308, + 387, + 533, + 516 + ], + "lines": [ + { + "bbox": [ + 308, + 387, + 533, + 516 + ], + "spans": [ + { + "bbox": [ + 308, + 387, + 533, + 516 + ], + "type": "image", + "image_path": "35c5b458c2182b9b490a45b3816e4fc2f50b5128398cf5a2ca7d6ddf42470a1b.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 78, + 520, + 302, + 650 + ], + "blocks": [ + { + "bbox": [ + 78, + 520, + 302, + 650 + ], + "lines": [ + { + "bbox": [ + 78, + 520, + 302, + 650 + ], + "spans": [ + { + "bbox": [ + 78, + 520, + 302, + 650 + ], + "type": "image", + "image_path": "1f8ce392dcce7770903d3307c0a3c06e266cc11aacf0341e740d2ab6a0d0da40.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 661, + 560, + 685 + ], + "lines": [ + { + "bbox": [ + 50, + 661, + 560, + 685 + ], + "spans": [ + { + "bbox": [ + 50, + 661, + 560, + 685 + ], + "type": "text", + "content": "Figure 8: Phase transition in Tina-0penR1. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 308, + 520, + 533, + 650 + ], + "blocks": [ + { + "bbox": [ + 308, + 520, + 533, + 650 + ], + "lines": [ + { + "bbox": [ + 308, + 520, + 533, + 650 + ], + "spans": [ + { + "bbox": [ + 308, + 520, + 533, + 650 + ], + "type": "image", + "image_path": "d77248ceec0597e3173ad7abb2455e9c6b9bd88d21d80b0ff7244825d3df6bc0.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 119, + 302, + 249 + ], + "blocks": [ + { + "bbox": [ + 78, + 119, + 302, + 249 + ], + "lines": [ + { + "bbox": [ + 78, + 119, + 302, + 249 + ], + "spans": [ + { + "bbox": [ + 78, + 119, + 302, + 249 + ], + "type": "image", + "image_path": "48735e2ffbf120d3300187ac8dc036e9eb73a17de816b992af60c8f0bba9dee0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 307, + 119, + 533, + 249 + ], + "blocks": [ + { + "bbox": [ + 307, + 119, + 533, + 249 + ], + "lines": [ + { + "bbox": [ + 307, + 119, + 533, + 249 + ], + "spans": [ + { + "bbox": [ + 307, + 119, + 533, + 249 + ], + "type": "image", + "image_path": "bc9a0f1c786c904a3f69ed882f4cf22f1a6a30c05c7bf904d4badae2ee5de727.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 77, + 253, + 302, + 383 + ], + "blocks": [ + { + "bbox": [ + 77, + 253, + 302, + 383 + ], + "lines": [ + { + "bbox": [ + 77, + 253, + 302, + 383 + ], + "spans": [ + { + "bbox": [ + 77, + 253, + 302, + 383 + ], + "type": "image", + "image_path": "7687d000e20a32466588a78df3334c316590d1f20e9a7d47bf25e95f2358b2fc.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 308, + 253, + 533, + 383 + ], + "blocks": [ + { + "bbox": [ + 308, + 253, + 533, + 383 + ], + "lines": [ + { + "bbox": [ + 308, + 253, + 533, + 383 + ], + "spans": [ + { + "bbox": [ + 308, + 253, + 533, + 383 + ], + "type": "image", + "image_path": "e12f87e58a46f826adb346894e3dc14c83b821d5f0533d83abf938406e0ec4c7.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 77, + 387, + 302, + 517 + ], + "blocks": [ + { + "bbox": [ + 77, + 387, + 302, + 517 + ], + "lines": [ + { + "bbox": [ + 77, + 387, + 302, + 517 + ], + "spans": [ + { + "bbox": [ + 77, + 387, + 302, + 517 + ], + "type": "image", + "image_path": "30264a3080c01d7d363c382e51e0d8e0b3db806e01af37dd0be555d1b1175270.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 307, + 387, + 533, + 517 + ], + "blocks": [ + { + "bbox": [ + 307, + 387, + 533, + 517 + ], + "lines": [ + { + "bbox": [ + 307, + 387, + 533, + 517 + ], + "spans": [ + { + "bbox": [ + 307, + 387, + 533, + 517 + ], + "type": "image", + "image_path": "93c8ae4fc2db00f11f3c86a37f454a2dd65aef08a5cee4ddac7578f774212764.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 77, + 520, + 302, + 650 + ], + "blocks": [ + { + "bbox": [ + 77, + 520, + 302, + 650 + ], + "lines": [ + { + "bbox": [ + 77, + 520, + 302, + 650 + ], + "spans": [ + { + "bbox": [ + 77, + 520, + 302, + 650 + ], + "type": "image", + "image_path": "d0a6b65304f02a110defa4ef71b15594342b223bbbf05dc73dd44d06fd2fb35c.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 661, + 560, + 685 + ], + "lines": [ + { + "bbox": [ + 50, + 661, + 560, + 685 + ], + "spans": [ + { + "bbox": [ + 50, + 661, + 560, + 685 + ], + "type": "text", + "content": "Figure 9: Phase transition in Tina-OpenThoughts. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 307, + 520, + 533, + 650 + ], + "blocks": [ + { + "bbox": [ + 307, + 520, + 533, + 650 + ], + "lines": [ + { + "bbox": [ + 307, + 520, + 533, + 650 + ], + "spans": [ + { + "bbox": [ + 307, + 520, + 533, + 650 + ], + "type": "image", + "image_path": "1f769eabe9d3b95792ee514dca322f1a413030ec9276fa2fa355fb3eeeebcc94.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 547, + 750, + 557, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 547, + 750, + 557, + 759 + ], + "spans": [ + { + "bbox": [ + 547, + 750, + 557, + 759 + ], + "type": "text", + "content": "31" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 237, + 220, + 330 + ], + "blocks": [ + { + "bbox": [ + 60, + 237, + 220, + 330 + ], + "lines": [ + { + "bbox": [ + 60, + 237, + 220, + 330 + ], + "spans": [ + { + "bbox": [ + 60, + 237, + 220, + 330 + ], + "type": "image", + "image_path": "6863227a9bb5aee1c96f10a6063b1ed571d1bf23545e337ee8f67927af9938c9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 224, + 237, + 385, + 330 + ], + "blocks": [ + { + "bbox": [ + 224, + 237, + 385, + 330 + ], + "lines": [ + { + "bbox": [ + 224, + 237, + 385, + 330 + ], + "spans": [ + { + "bbox": [ + 224, + 237, + 385, + 330 + ], + "type": "image", + "image_path": "8e7d37997e4454107c9b3f1d5d83cd73d2c4e31cb6d40d54edf747a32eeab3c1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 389, + 237, + 550, + 331 + ], + "blocks": [ + { + "bbox": [ + 389, + 237, + 550, + 331 + ], + "lines": [ + { + "bbox": [ + 389, + 237, + 550, + 331 + ], + "spans": [ + { + "bbox": [ + 389, + 237, + 550, + 331 + ], + "type": "image", + "image_path": "e10e05106fdbb6069713e797be132f798dd8dd77c486e0e23785c982fa11ca63.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 60, + 332, + 221, + 426 + ], + "blocks": [ + { + "bbox": [ + 60, + 332, + 221, + 426 + ], + "lines": [ + { + "bbox": [ + 60, + 332, + 221, + 426 + ], + "spans": [ + { + "bbox": [ + 60, + 332, + 221, + 426 + ], + "type": "image", + "image_path": "f82684fd818799f06d8422102e17ce5a54eb80cd17d33223ef24703ede35a673.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 224, + 332, + 385, + 426 + ], + "blocks": [ + { + "bbox": [ + 224, + 332, + 385, + 426 + ], + "lines": [ + { + "bbox": [ + 224, + 332, + 385, + 426 + ], + "spans": [ + { + "bbox": [ + 224, + 332, + 385, + 426 + ], + "type": "image", + "image_path": "94b7e9fa1ffeecc96b0ef41ea51000b26d02f97e7c1e3593294689e973f839ad.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 389, + 332, + 550, + 426 + ], + "blocks": [ + { + "bbox": [ + 389, + 332, + 550, + 426 + ], + "lines": [ + { + "bbox": [ + 389, + 332, + 550, + 426 + ], + "spans": [ + { + "bbox": [ + 389, + 332, + 550, + 426 + ], + "type": "image", + "image_path": "e26a69ce1c5d5ce240e9dc3cff8e42ad98280111c18653b9dbec50810ca60eca.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 59, + 427, + 221, + 521 + ], + "blocks": [ + { + "bbox": [ + 59, + 427, + 221, + 521 + ], + "lines": [ + { + "bbox": [ + 59, + 427, + 221, + 521 + ], + "spans": [ + { + "bbox": [ + 59, + 427, + 221, + 521 + ], + "type": "image", + "image_path": "5a9ddd7c6ab7cbde615412641ceea6c25f0f3366649e5f32dd25392dc06b6c70.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 531, + 560, + 566 + ], + "lines": [ + { + "bbox": [ + 50, + 531, + 560, + 566 + ], + "spans": [ + { + "bbox": [ + 50, + 531, + 560, + 566 + ], + "type": "text", + "content": "Figure 10: Phase transition in Tina-LIMR, Tina-LIMR-64-LoRA-rank and Tina-LIMR-16-LoRA-rank. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 224, + 427, + 385, + 521 + ], + "blocks": [ + { + "bbox": [ + 224, + 427, + 385, + 521 + ], + "lines": [ + { + "bbox": [ + 224, + 427, + 385, + 521 + ], + "spans": [ + { + "bbox": [ + 224, + 427, + 385, + 521 + ], + "type": "image", + "image_path": "ed65f9fd40831f8b3283fbd88857fbde1efe7a1a747af3aad5020ea0ab07ecfb.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 389, + 427, + 550, + 521 + ], + "blocks": [ + { + "bbox": [ + 389, + 427, + 550, + 521 + ], + "lines": [ + { + "bbox": [ + 389, + 427, + 550, + 521 + ], + "spans": [ + { + "bbox": [ + 389, + 427, + 550, + 521 + ], + "type": "image", + "image_path": "04e83e21598d670a72734bc1836dedfcb3fa2769f649de2a33509249caabd40a.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 186, + 302, + 316 + ], + "blocks": [ + { + "bbox": [ + 78, + 186, + 302, + 316 + ], + "lines": [ + { + "bbox": [ + 78, + 186, + 302, + 316 + ], + "spans": [ + { + "bbox": [ + 78, + 186, + 302, + 316 + ], + "type": "image", + "image_path": "919f8c76c34c77a7b4d22d069a3190f1356853252c2e8e191e1d7805afc3a619.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 307, + 186, + 533, + 316 + ], + "blocks": [ + { + "bbox": [ + 307, + 186, + 533, + 316 + ], + "lines": [ + { + "bbox": [ + 307, + 186, + 533, + 316 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 533, + 316 + ], + "type": "image", + "image_path": "680477ed58ba6ca295f1e70b13709cb218a8a1b3b052246ddbe20f98c2db2562.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 77, + 319, + 302, + 449 + ], + "blocks": [ + { + "bbox": [ + 77, + 319, + 302, + 449 + ], + "lines": [ + { + "bbox": [ + 77, + 319, + 302, + 449 + ], + "spans": [ + { + "bbox": [ + 77, + 319, + 302, + 449 + ], + "type": "image", + "image_path": "15737af1ce0d1a20f369f184d8ff8cb4509c833fe136e34db3dc3a6b909812f2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 307, + 319, + 533, + 449 + ], + "blocks": [ + { + "bbox": [ + 307, + 319, + 533, + 449 + ], + "lines": [ + { + "bbox": [ + 307, + 319, + 533, + 449 + ], + "spans": [ + { + "bbox": [ + 307, + 319, + 533, + 449 + ], + "type": "image", + "image_path": "7c0cee9452adf0935fd7975a14037e4d33ede03232890ec890460647a16fa1c2.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 77, + 453, + 302, + 583 + ], + "blocks": [ + { + "bbox": [ + 77, + 453, + 302, + 583 + ], + "lines": [ + { + "bbox": [ + 77, + 453, + 302, + 583 + ], + "spans": [ + { + "bbox": [ + 77, + 453, + 302, + 583 + ], + "type": "image", + "image_path": "e293c0018a412234e72b8b60b87a9e3dca0699425d5b9a868e7d6995c7a461f3.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 594, + 560, + 620 + ], + "lines": [ + { + "bbox": [ + 50, + 594, + 560, + 620 + ], + "spans": [ + { + "bbox": [ + 50, + 594, + 560, + 620 + ], + "type": "text", + "content": "Figure 11: Phase transition in Tina-LIMR-8-LoRA-rank and Tina-LIMR-4-LoRA-rank. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 307, + 453, + 533, + 583 + ], + "blocks": [ + { + "bbox": [ + 307, + 453, + 533, + 583 + ], + "lines": [ + { + "bbox": [ + 307, + 453, + 533, + 583 + ], + "spans": [ + { + "bbox": [ + 307, + 453, + 533, + 583 + ], + "type": "image", + "image_path": "bbe93058d5e119281dbe2a2a7453e28bb4b576cf301184abcc57d8ced53d3f62.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 76, + 186, + 303, + 316 + ], + "blocks": [ + { + "bbox": [ + 76, + 186, + 303, + 316 + ], + "lines": [ + { + "bbox": [ + 76, + 186, + 303, + 316 + ], + "spans": [ + { + "bbox": [ + 76, + 186, + 303, + 316 + ], + "type": "image", + "image_path": "f4186b7a62fdd3d663ffed363a38e286da6614aad9564a13f40b27a02c1f824c.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 307, + 186, + 533, + 316 + ], + "blocks": [ + { + "bbox": [ + 307, + 186, + 533, + 316 + ], + "lines": [ + { + "bbox": [ + 307, + 186, + 533, + 316 + ], + "spans": [ + { + "bbox": [ + 307, + 186, + 533, + 316 + ], + "type": "image", + "image_path": "40017567513cbf6d1900af9033c2f09bd1c6aef4b039218e1da821aa0c9116ef.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 77, + 319, + 302, + 449 + ], + "blocks": [ + { + "bbox": [ + 77, + 319, + 302, + 449 + ], + "lines": [ + { + "bbox": [ + 77, + 319, + 302, + 449 + ], + "spans": [ + { + "bbox": [ + 77, + 319, + 302, + 449 + ], + "type": "image", + "image_path": "74cbc8332e1e8c7a7c24513fe7fc932848cb5796a13820024616f51ed8f084a3.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 307, + 319, + 533, + 449 + ], + "blocks": [ + { + "bbox": [ + 307, + 319, + 533, + 449 + ], + "lines": [ + { + "bbox": [ + 307, + 319, + 533, + 449 + ], + "spans": [ + { + "bbox": [ + 307, + 319, + 533, + 449 + ], + "type": "image", + "image_path": "47eb8632f9c85c72c92536872469d78675758b2e5cda7141a97c301b03c3f345.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 77, + 453, + 302, + 583 + ], + "blocks": [ + { + "bbox": [ + 77, + 453, + 302, + 583 + ], + "lines": [ + { + "bbox": [ + 77, + 453, + 302, + 583 + ], + "spans": [ + { + "bbox": [ + 77, + 453, + 302, + 583 + ], + "type": "image", + "image_path": "92d66da5abf488c5b52ee99dc185b1dc09d8af4a68e2fbe8d2d7f207a0009495.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 594, + 560, + 620 + ], + "lines": [ + { + "bbox": [ + 50, + 594, + 560, + 620 + ], + "spans": [ + { + "bbox": [ + 50, + 594, + 560, + 620 + ], + "type": "text", + "content": "Figure 12: Phase transition in Tina-LIMR-5e-6-1r and Tina-LIMR-5e-7-1r. The raw data is from the Weights & Biases training logs and smoothed via exponential moving average (EMA) with factor 0.1." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 307, + 453, + 533, + 583 + ], + "blocks": [ + { + "bbox": [ + 307, + 453, + 533, + 583 + ], + "lines": [ + { + "bbox": [ + 307, + 453, + 533, + 583 + ], + "spans": [ + { + "bbox": [ + 307, + 453, + 533, + 583 + ], + "type": "image", + "image_path": "9963c4c73368a08be9b4ba416a12dbe2b4ef4ec1b7263f7133125609784b4ce9.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "spans": [ + { + "bbox": [ + 236, + 54, + 374, + 64 + ], + "type": "text", + "content": "Tina: Tiny Reasoning Models via LoRA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "spans": [ + { + "bbox": [ + 547, + 750, + 558, + 759 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15848/c2a6d104-48f7-465b-bad3-e87ed3722daf_content_list.json b/data/2025/2504_15xxx/2504.15848/c2a6d104-48f7-465b-bad3-e87ed3722daf_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..fc2f6aef8f56aa78fe37583600548886afbfa474 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/c2a6d104-48f7-465b-bad3-e87ed3722daf_content_list.json @@ -0,0 +1,3047 @@ +[ + { + "type": "text", + "text": "Exploring Cognitive and Aesthetic Causality for Multimodal Aspect-Based Sentiment Analysis", + "text_level": 1, + "bbox": [ + 73, + 65, + 921, + 136 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Luwei Xiao, Student Member, IEEE, Rui Mao*, Member, IEEE, Shuai Zhao, Qika Lin, Yanhao Jia, Liang He, and Erik Cambria, Fellow, IEEE", + "bbox": [ + 194, + 150, + 800, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract—Multimodal aspect-based sentiment classification (MASC) is an emerging task due to an increase in user-generated multimodal content on social platforms, aimed at predicting sentiment polarity toward specific aspect targets (i.e., entities or attributes explicitly mentioned in text-image pairs). Despite extensive efforts and significant achievements in existing MASC, substantial gaps remain in understanding fine-grained visual content and the cognitive rationales derived from semantic content and impressions (cognitive interpretations of emotions evoked by image content). In this study, we present Chimera: a cognitive and aesthetic sentiment causality understanding framework to derive fine-grained holistic features of aspects and infer the fundamental drivers of sentiment expression from both semantic perspectives and affective-cognitive resonance (the synergistic effect between emotional responses and cognitive interpretations). Specifically, this framework first incorporates visual patch features for patch-word alignment. Meanwhile, it extracts coarse-grained visual features (e.g., overall image representation) and fine-grained visual regions (e.g., aspect-related regions) and translates them into corresponding textual descriptions (e.g., facial, aesthetic). Finally, we leverage the sentimental causes and impressions generated by a large language model (LLM) to enhance the model's awareness of sentimental cues evoked by semantic content and affective-cognitive resonance. Experimental results on standard MASC datasets demonstrate the effectiveness of the proposed model, which also exhibits greater flexibility to MASC compared to LLMs such as GPT-4o. We have publicly released the complete implementation and dataset at https://github.com/Xillv/Chimera", + "bbox": [ + 104, + 208, + 888, + 393 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Index Terms—Multimodal aspect-based sentiment classification, Sentiment causality, Large language models, Affective-cognitive resonance.", + "bbox": [ + 104, + 404, + 859, + 431 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 73, + 462, + 230, + 478 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "MULTIMODAL aspect-based sentiment classification (MASC) is a valuable task for analyzing user-generated multimodal content on social platforms, aiming to predict the sentiment polarity of a specific target/aspect term within a sentence, based on an image-text pair. In an era marked by growing global interconnectedness, social platforms have become essential channels for individuals to express opinions and share experiences [1]-[3]. These platforms support multimodal content, blending text and visual media, which better reflects how sentiment is conveyed [4]. Consequently, analyzing fine-grained sentiment expression in multimodal scenarios not only improves the depth of sentiment classification but also aligns with the natural manner in which users express opinions and emotions, ultimately supporting more accurate sentiment analysis for applications in finance [5], [6], social research [7], [8], and human-computer interaction [9], [10]. Current methodologies for MASC can be broadly divided into two principal categories: visual-text fusion-based approaches and translation-based approaches. Visual-text fusion-based methods address MASC by directly integrating visual content with", + "bbox": [ + 71, + 489, + 491, + 797 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "textual features through various attention-based mechanisms [11]-[16].", + "bbox": [ + 503, + 489, + 921, + 518 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yu et al. [11] were the first to propose the utilization of ResNet for image feature extraction in conjunction with BERT for language sequence modeling, subsequently feeding these components into a BERT encoder to facilitate the interactive modeling of cross-modal representations. Ling et al. [14] introduced a vision-language pre-training framework that leverages Faster R-CNN for extracting object-level visual features and BART for generating textual features, with the model pre-trained using three task-specific strategies targeting the language, vision, respectively. Yu et al. [13] presented a novel multi-task learning framework Image-Target Matching Network (ITM), which concurrently performs coarse-to-fine-grained visual-textual relevance detection and visual object-target alignment through cross-modal Transformers. Translation-based approaches focus on mapping visual content into the language space as auxiliary textual representations, leveraging this supplementary information, or integrating it with visual features to enhance MASC [17]-[22]. Khan et al. [17] translated the image into a corresponding caption, which is then jointly input with the sentence into BERT to predict the sentiment polarity associated with specific targets. Yang et al. [23] exploit a face-sensitive, translation-based approach that translates facial expressions in images into textual sentiment cues, which are then selectively aligned and fused with the targets for enhanced sentiment analysis. Xiao et al. [19] proposed the CoolNet framework, which generates visual captions for images and extracts syntactic and semantic features from the textual", + "bbox": [ + 501, + 518, + 924, + 944 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025", + "bbox": [ + 73, + 31, + 517, + 42 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Luwei Xiao, and Liang He are with the School of Computer Science and Technology, East China Normal University, Shanghai 200062, China. E-mail: louisshaw@stu.ecnu.edu.cn, lhe@cs.ecnu.edu.cn", + "bbox": [ + 71, + 820, + 491, + 854 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "- Rui Mao, Shuai Zhao, Yanhao Jia and Erik Cambria are with the College of Computing and Data Science, Nanyang Technological University, Singapore 639798. E-mail:{rui.mao, shuai.zhao, cambria}@ntu.edu.sg, yanhao002@e.ntu.edu.sg", + "bbox": [ + 71, + 854, + 491, + 901 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Qika Lin is with the Saw Swee Hock School of Public Health, National University of Singapore 119077. E-mail: linqika@nus.edu.sg", + "bbox": [ + 73, + 901, + 491, + 925 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Corresponding author: Rui Mao", + "bbox": [ + 75, + 928, + 259, + 941 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.15848v1 [cs.CL] 22 Apr 2025", + "bbox": [ + 22, + 263, + 60, + 708 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "modality, subsequently fusing these with visual features through a cross-modal Transformer.", + "bbox": [ + 73, + 53, + 491, + 82 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Despite substantial efforts and promising advancements, current solutions continue to encounter the following challenges. First, excessive duplicative visual patches can overshadow critical visual clues relevant to the specific target, leading to considerable misalignment during patch-token interactions. These small visual patches often lack semantic coherence compared to complete visual regions, particularly when aligning targets with their corresponding objects in an image, potentially leading to ambiguous semantic representations. Second, limited studies have focused on the underlying rationale behind sentiment cues, particularly from the perspectives of semantic content and affective-cognitive resonance. Owing to the multimodal nature of Twitter content, which spans diverse facets of daily life, inferring the sentiment associated with specific targets necessitates not only an understanding of the surface-level information in text and images (e.g., facial expressions) but also an in-depth comprehension of the contextual background of particular events and the impressions evoked by the image's content and aesthetic attributes.", + "bbox": [ + 71, + 97, + 491, + 387 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address the aforementioned challenges, this paper proposes Chimera: a cognitive and aesthetic sentiment causality understanding framework. This framework aims to incorporate and align fine-grained features of specific targets and reasons about semantic and impression rationales. However, two critical issues must be resolved to achieve these objectives: 1) How can specific targets in a sentence be aligned with their corresponding object-level fine-grained features in an image? 2) How can the model be enabled to reason about the emotional causal reasons within the semantic content of image-text pairs and the affective resonance evoked by image aesthetic attributes? For the first question, we propose to make the cross-modal alignment of the target via the visual patch-level by linguistic-aware patch-token alignment and object-level by accurately translating the object feature into language space. Regarding the second issue, while a recent study [24] developed a reasoning dataset for MASC, this dataset primarily explains the emotional causes within textual content and lacks reasoning capabilities for visual content and the affective resonance evoked by images, limiting its suitability for the multimodal nature of this task. Consequently, we employ a large language model (LLM), GPT-4o, to generate the semantic rationale and impression rationale to understand the causal foundations of emotions.", + "bbox": [ + 71, + 388, + 491, + 751 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Specifically, our proposed framework first extracts visual patch-level and textual features, feeding them into a tailored linguistic-aware patch-token alignment (LPA) module to achieve patch-token alignment. Concurrently, a translation module (TM) translates the holistic image or object-level content into aesthetic captions or facial descriptions, leveraging multimodal named entity annotations from the work of Wang et al. [25]. The TM-generated text, along with the sentence and aspect, is then input into a generative module for multi-task learning to produce sentiment polarity, semantic rationale (SR), and impression rationale (IR). By bootstrapping the model's perception of underlying rationale through an in-depth understanding of textual and", + "bbox": [ + 71, + 752, + 491, + 941 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "visual content as well as the affective resonance evoked by images, it enhances the performance of sentiment classification.", + "bbox": [ + 503, + 53, + 921, + 95 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In a nutshell, the primary contributions are as follows:", + "bbox": [ + 527, + 95, + 910, + 111 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a novel framework for MASC that aligns specific targets with their corresponding visual objects at the patch-token and object levels while equipping the model with causal rationale reasoning ability for semantic rationale (SR), and impression rationale (IR).", + "- We approach this task by enabling the model to grasp the semantic content of image-text pairs and the affective resonance evoked by images. To our knowledge, we are the first to collect semantic and impression rationale data for the MASC task, based on existing MASC datasets, extending its content to incorporate semantic and impression rationale, offering a valuable resource for advancing MASC research.", + "- Experiments on three widely-used Twitter benchmarks demonstrate that our proposed method outperforms previous approaches, achieving state-of-the-art performance. Further evaluations validate the effectiveness of our approach for MASC tasks." + ], + "bbox": [ + 527, + 117, + 921, + 409 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The remainder of this paper is organized as follows: Section 2 provides an overview of related research on multimodal aspect-based sentiment classification, image aesthetic assessment, and multimodal learning. Section 3 details the proposed framework, including linguistics-aware patchtoken alignment, the translation-based module, causal rationale dataset construction, and LLM-based annotation generation. Main experimental results are presented in Section 4, and the in-depth analysis is shown in 5, followed by conclusions in Section 6.", + "bbox": [ + 503, + 415, + 924, + 561 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 RELATED WORK", + "text_level": 1, + "bbox": [ + 504, + 580, + 671, + 594 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This section reviews key methods in multimodal aspect-based sentiment analysis and image aesthetic assessment. Additionally, as our novel rationale dataset is constructed using an LLM, we introduce LLMs for data annotation.", + "bbox": [ + 503, + 599, + 921, + 659 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 Multimodal Aspect-based Sentiment Analysis", + "text_level": 1, + "bbox": [ + 503, + 675, + 883, + 691 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Sentiment analysis is a well-established research area focused on understanding and identifying human emotions and opinions across various contexts [26]–[31]. With the exponential growth of user-generated multimodal content (e.g., image-text pairs, video clips) on social platforms [32]–[35] has drawn substantial attention to Multimodal Aspect-based Sentiment Analysis (MABSA) [36]–[40]. The MABSA task consists of two sub-tasks: Multimodal Aspect Term Extraction (MATE) and our focused MASC task. MATE [41] is essentially a named entity recognition task aimed at identifying all relevant specific targets within the textual content of an image-text pair. MASC [42], [43] is a text classification task in which specific targets are provided, requiring the identification of their sentiment polarity (positive, neutral, or negative) based on the given image-text pair. A series of recent studies have successfully unified these two subtasks into a single framework, effectively", + "bbox": [ + 501, + 694, + 923, + 944 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025", + "bbox": [ + 73, + 31, + 517, + 44 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/c7f16f703165deced537a0b922aa2f80c5c899a274c670eeeceb35f3ea956d98.jpg", + "image_caption": [ + "Fig. 1. The overall framework of the proposed Chimera. Chimera consists of four parts: Translation Module, Rationale Dataset Construction, Linguistic-aware Semantic Alignment, and Rationale-Aware Learning." + ], + "image_footnote": [], + "bbox": [ + 86, + 55, + 911, + 397 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "streamlining the MABSA process [14], [15], [22], [44]–[47]. Among these studies, Yu et al. [12] proposed the Entity-Sensitive Attention and Fusion Network (ESAFN), which employs entity-oriented attention combined with a visual gate mechanism to model entity-sensitive inter-dynamics for MASC. Ju et al. [44] were the first to integrate MATE and MASC into a end-to-end task, developing a joint learning framework with cross-modal relation detection. Kruk et al. [35] proposed a multimodal framework for Instagram intent detection, integrating three taxonomies and the MDID dataset. It demonstrates that text-image fusion enhances accuracy by $9.6\\%$ under semiotic divergence, emphasizing the necessity of multimodal models for capturing the non-intersective \"meaning multiplication\" inherent in social media. Yang et al. [15] improved cross-modal alignment modeling through a Transformer-based multi-task learning framework, incorporating text-guided cross-modal interactions and using adjective-noun pairs as supervision for a visual auxiliary task.", + "bbox": [ + 71, + 463, + 491, + 741 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Zhou et al. [16] developed an aspect-oriented multimodal fusion approach that constructs an informative dependency graph to minimize additional visual and textual noise in cross-modal interactions by selectively processing aspect-relevant textual and image features. Huang et al. [20] put forward to mapping images into scene graphs, using triplet semantic relationships among entities along with image captions to construct a relatedness matrix for achieving cross-modal alignment in MASC. More recently, Xiao et al. [22] introduced the Atlantis, a trident-shaped architecture that incorporates aesthetic attributes to enhance the emotional resonance of visual content. Fan et al. [24] devised a Flant5-based multi-task learning architecture to enhance the", + "bbox": [ + 71, + 752, + 491, + 941 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "model's reasoning capabilities for inferring underlying and direct causes of sentiment expressions. Additionally, they constructed a practical causal dataset for MASC.", + "bbox": [ + 503, + 463, + 921, + 507 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our proposed method aims to achieve cross-modal alignment at the patch and object levels while equipping the model with reasoning capabilities to discern the semantic and impression rationale underlying sentiment expressions.", + "bbox": [ + 503, + 508, + 921, + 566 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 Image Aesthetic Assessment", + "text_level": 1, + "bbox": [ + 504, + 587, + 764, + 602 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Image aesthetics play a fundamental role in shaping viewers' emotional responses and overall aesthetic experience through complex psychological and cognitive processes [48]. Image aesthetics pertain to the subjective evaluation and appreciation of its beauty [49]. Image Aesthetic Assessment seeks to systematically appraise this aesthetic quality by analyzing the visual appeal of images [50]. Empirical psychological research corroborates that images can trigger a wide range of emotions, which are influenced by their aesthetic attributes and semantic content [51]. Previous research concentrated on aesthetic image captioning and analysis through the aggregation of commentary on aesthetic attributes [52]. These studies address the concepts of style, layout, and aesthetics from the viewpoints of beauty and visual attractiveness. Recent scholarly efforts have focused on encouraging vision-language models to generate visual connotations and captions related to various aesthetic attributes (e.g., color, harmony, lighting, composition) [53]. More recently, Kruk et al. [54] introduced a connotation-rich dataset, Impressions, designed to explore the emotions, thoughts, and beliefs that images evoke, along with the aesthetic elements that elicit these responses. The introduction of this dataset marks a significant advance in the study of", + "bbox": [ + 501, + 606, + 923, + 944 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025", + "bbox": [ + 73, + 32, + 517, + 44 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "how visual stimuli can influence complex perceptual and emotional outcomes.", + "bbox": [ + 71, + 53, + 491, + 80 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this study, we utilize aesthetic attributes to capture sentiment cues within visual content at both object and holistic levels. Inspired by Impressions [54], we further prompt the LLM to generate impression rationales for MASC, enabling analysis of the underlying affective resonance evoked by images.", + "bbox": [ + 71, + 82, + 491, + 171 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.3 LLMs-Based Rationale Generation", + "text_level": 1, + "bbox": [ + 73, + 188, + 369, + 203 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Recently, LLMs have achieved significant success across various downstream tasks [55]–[58]. LLMs such as GPT-40 [59], Gemini [60], and LLaMA-2 [61] hold significant potential to usher data annotation into a new era, functioning not merely as auxiliary tools but as vital enhancers of its effectiveness and quality [62], [63]. LLMs can automatically annotate samples, ensure consistency across large data volumes, and adapt to specific domains via fine-tuning, thereby establishing a new standard in deep learning [64]–[66]. The rationale represents the detailed cognitive process an individual typically follows when solving a problem, providing useful supplementary information for the final answer [67]. Early studies [68] typically relied on human experts to annotate rationale in datasets, significantly limiting availability and scalability. A bunch of diverse methodologies have been developed to produce high-quality and fine-grained rationale. Wang et al. [69] proposed to elucidate each choice in a sample by generating choice-specific rationales via LLMs. Wang et al. [70] enhanced the credibility of generated rationales by incorporating gold-standard answers and using contrastive decoding algorithms. Liu et al. [71] laid much emphasis on curating high-quality prompts to obtain fine-grained rationales from GPT-4o and build a logical chain-of-thought instruction-tuning dataset. More recently, Kang et al. [72] developed a sophisticated neural reranking mechanism to dynamically retrieve highly relevant supplementary documents for generating high-quality rationales in knowledge-intensive reasoning tasks.", + "bbox": [ + 71, + 207, + 491, + 614 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this paper, we build upon the work of Wang et al. [70] by fully utilizing the dataset's gold-standard annotations to generate semantic and impression rationales through meticulously designed prompts. This approach ensures high-quality rationale generation while avoiding additional costs from trial-and-error OpenAI API usage fees.", + "bbox": [ + 71, + 614, + 491, + 705 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 METHODOLOGY", + "text_level": 1, + "bbox": [ + 73, + 724, + 235, + 738 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This section presents our proposed framework for MASC, beginning with the task formalization, followed by the rationale dataset construction process, and concluding with the proposed method, comprising linguistic-aware semantic alignment, a translation module, rationale dataset construction and a rationale-aware learning framework.", + "bbox": [ + 71, + 743, + 491, + 832 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Task Definition", + "text_level": 1, + "bbox": [ + 73, + 849, + 225, + 864 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given a multimodal dataset $M$ , each sample $X_{i}$ consists of an image $V_{i}$ paired with a sentence $S_{i}$ containing one or more specific targets $T_{i}$ . The goal of MASC is to predict the sentiment polarity $Y_{i} \\in \\{\\text{Positive}, \\text{Negative}, \\text{Neutral}\\}$ for a specific target $T_{i}$ . Moreover, our framework infers", + "bbox": [ + 71, + 868, + 491, + 944 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "both semantic rationale $SR_{i}$ and impression rationale $IR_{i}$ , explaining the sentiment prediction $Y_{i}$ for a specific target $T_{i}$ , based on multimodal semantic meaning and the affective resonance evoked by the image. In this study, the model outputs $SR_{i}, IR_{i}, Y_{i}$ for an input sample $X_{i} = (S_{i}, V_{i}, T_{i})$ , where $SR_{i}$ and $IR_{i}$ offer supplementary sentimental cues for sentiment prediction $T_{i}$ .", + "bbox": [ + 501, + 53, + 924, + 157 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 Method Overview", + "text_level": 1, + "bbox": [ + 504, + 172, + 676, + 186 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As shown in Figure 1, our proposed framework comprises four technical components, namely a Translation Module, Rationale Dataset Construction, Linguistic-aware Semantic Alignment, and Rationale-Aware Learning. The Translation Module converts visual content, both holistic and object-level, into language captions. For entire images, it generates emotion-laden aesthetic captions using our fine-tuned BLIP. For object-level content, it maps visuals to facial descriptions or aesthetic captions with rich emotional cues via EmoLA or our fine-tuned BLIP. The construction of the rationale dataset involves generating semantic and impression rationales. We curate prompts tailored to each rationale category and input them, along with the samples, into GPT-4o to collect the desired rationales. The Linguistic-aware Semantic Alignment module segments the input image into patches, dynamically selects and refines relevant visual patches, and achieves patch-token alignment guided by linguistic features from the input sentence. Lastly, we propose a Rationale-Aware Learning framework built up on a generative model that simultaneously learns sentiment classification, semantic rationale generation, and impression rationale generation from diverse textual inputs, such as sentences, aesthetic captions, and facial descriptions.", + "bbox": [ + 501, + 191, + 924, + 529 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3 Translation Module", + "text_level": 1, + "bbox": [ + 504, + 544, + 689, + 558 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This module translates visual content into overall aesthetic captions, object-level facial descriptions, or object-level aesthetic captions in textual form, embedding rich sentimental cues to facilitate object-level sentiment alignment. Specifically, we leverage object annotations from the Fine-Grained Multimodal Named Entity Recognition (MNER) task [25], which annotates specific targets in the sentence and their corresponding objects in the image. The MNER dataset is derived from the same Twitter dataset as the MASC datasets, incorporating the original image-text pairs from MASC. We further pre-process the MNER dataset and transfer its object annotations to the MASC dataset. To generate aesthetic captions rich in sentimental cues, we fine-tune a BLIP model using the recent aesthetic-specific dataset, Impression [54]. For facial description, we deploy the LLM-based EmoLA [73] to interpret fine-grained human mental states from images.", + "bbox": [ + 501, + 563, + 924, + 811 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To tackle the challenge of potential one-to-many annotation scenarios, wherein multiple visual objects correspond to a specific target in the sentence, we calculate the similarity between the entire image and all object annotations, retaining only the object with the highest similarity score for each specific target. Subsequently, we generate various textual auxiliary sentences, based on object annotations. Firstly, in cases where the object corresponding to a specific target is absent from the image, a fine-tuned BLIP model is applied to", + "bbox": [ + 501, + 811, + 924, + 944 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025", + "bbox": [ + 73, + 31, + 517, + 44 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "generate an overall aesthetic caption $A^{c} = \\left(a_{1}^{c}, a_{2}^{c}, \\ldots, a_{N_{c}}^{c}\\right)$ for the entire image:", + "bbox": [ + 71, + 51, + 488, + 83 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nA ^ {c} = B L I P _ {\\text {f i n e}} (V), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 214, + 89, + 488, + 107 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $BLIP_{fine}(\\cdot)$ is the fine-tuned BLIP over Impression dataset. If the object corresponding to a specific target is present in the image, we develop a Human-Object Differentiation (HOD) module based on the Sample and Computation Redistribution for Efficient Face Detection (SCRFD) [74] framework. This module determines the presence of a face within the annotated object-level visual content and assigns a facial binary label:", + "bbox": [ + 71, + 112, + 491, + 231 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nY _ {i} ^ {o _ {j}} = H O D \\left(V _ {i} ^ {o _ {j}}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 236, + 488, + 255 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $Y_{i}^{o_{j}} \\in [1,0]$ indicates whether the object-level visual content contains a face (0 for no face, 1 for face detected), and $V_{i}^{o_{j}}$ denotes the $j$ -th object-level visual content in the $i$ -th image. Subsequently, we generate facial descriptions or aesthetic captions for object-level visual content based on the facial binary label:", + "bbox": [ + 71, + 260, + 491, + 349 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nA ^ {o} = \\left\\{ \\begin{array}{l l} E m o L A \\left(V _ {i} ^ {o _ {j}}\\right), & \\text {i f} Y _ {i} ^ {o _ {j}} = 1, \\\\ B L I P _ {\\text {f i n e}} \\left(V _ {i} ^ {o _ {j}}\\right), & \\text {o t h e r w i s e}, \\end{array} \\right. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 151, + 354, + 488, + 393 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $A^o = (a_1^o, a_2^o, \\ldots, a_{N_o}^o)$ is the generated auxiliary sentence (facial description or aesthetic caption) for the object-level visual content.", + "bbox": [ + 71, + 400, + 491, + 445 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4 Rationale Dataset Construction", + "text_level": 1, + "bbox": [ + 73, + 472, + 346, + 486 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The current MASC benchmark includes only specific target (aspect) labels within the image-text pair sentences and their corresponding sentiment polarities. Recently, Fan et al. [24] introduced a dataset for MASC with cause analysis, focusing exclusively on textual semantics rather than integrating both visual and textual cues. Moreover, they overlook the affective resonance evoked by image aesthetic attributes, eliminating a crucial layer of emotional cues and resulting in an incomplete sentiment representation. This omission hinders the holistic integration of textual and visual modalities, leading to suboptimal sentiment modeling. Therefore, we employ GPT-4o to generate semantic and impression rationales, with the detailed generation process outlined in Algorithm 1.", + "bbox": [ + 71, + 494, + 491, + 700 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1 Rationale Dataset Construction" + ], + "code_body": "Input: All samples $(V, S, T, Y)$ in MASC dataset $M$ \nOutput: Rationale dataset $R$ which contains Semantic Rationale (SR) and Impression Rationale (IR) \n1: Design & refine prompt pool for SR (SRP) and IR (IRP) \n2: for each sample $(V_i, S_i, T_i, Y_i)$ in $M$ do \n3: //Randomly select a prompt from SRP for SR \n4: $SR_{prompt} \\gets PromptPoolforSR(V_i, S_i, T_i, Y_i)$ \n5: //Randomly select a prompt from IRP for IR \n6: $IR_{prompt} \\gets PromptPoolforIR(V_i, S_i, T_i, Y_i)$ \n7: Produce SR and IR via GPT-4o \n8: $SR_i \\gets GPT-4o(V_i, S_i, T_i, Y_i, SR_{prompt})$ \n9: $IR_i \\gets GPT-4o(V_i, S_i, T_i, Y_i, IR_{prompt})$ \n10: Add $(V_i, S_i, T_i, Y_i, SR_i, IR_i)$ to $R$ \n11: end for", + "bbox": [ + 73, + 732, + 491, + 939 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/f4d1c1a9ed4288e54021cae5187de76218c0eecc813a772b6d3020f33e5570c6.jpg", + "table_caption": [ + "TABLE1 Example prompts for semantic rationale generation." + ], + "table_footnote": [], + "table_body": "
TypePrompts
System PromptYou are an AI assistant specializing in multimodal understanding and sentiment analysis, particularly in scenarios involving the integration of image and text modalities.
Semantic Rationale Generation PromptYou will be provided with an image-text pair. Your task is to analyze the sentiment towards the specified entity {aspect} and explain why the sentiment polarity {label} is appropriate.\nYour explanation should consider both the semantic meaning of the text and the visual representation of the image, focusing on explicit content and the emotional or contextual cues conveyed by their combination.\nStart your response with: "Based on the image-text pair, the sentiment towards {aspect} is {label} because...". Provide a concise, focused explanation highlighting the single most compelling reason for this sentiment classification.
", + "bbox": [ + 517, + 90, + 911, + 395 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To comprehensively capture the emotional rationale underlying the identified sentiment polarity from a semantic perspective of both image and text, we employ GPT-4o (gpt-4o-2024-05-13) via the OpenAI $\\mathrm{API}^1$ to generate SR. Meanwhile, to enable the model to effectively capture implicit emotional cues arising from the affective resonance of aesthetic attributes, we employ GPT-4o to generate the IR.", + "bbox": [ + 503, + 416, + 923, + 518 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To enhance the diversity of generated semantic and impression rationales (SR and IR), we designed and refined a series of templates to construct separate prompt pools for SR and IR, from which a prompt is randomly selected as instructions to guide GPT-4o in generating the corresponding rationale. In this study, we adopt the approach outlined by Sarah et al. [75] and Wang et al. [70], leveraging tailored prompts conditioned on the dataset's gold-standard annotations to generate SR and IR using GPT-4o. The example prompts for generating SR and IR are presented in Tables 1 and 2, respectively.", + "bbox": [ + 503, + 518, + 921, + 680 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5 Linguistic-aware Semantic Alignment(LSA)", + "text_level": 1, + "bbox": [ + 503, + 695, + 864, + 710 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We first introduce dynamic patch selection in Sec. 3.5.1. Then, we introduce the semantic patch calibration in Sec. 3.5.2. and patch-token alignment in Sec. 3.5.3. The overall process of LSA is shown in the persucode 2.", + "bbox": [ + 503, + 713, + 921, + 772 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5.1 Dynamic Patch Selection(DPS)", + "text_level": 1, + "bbox": [ + 504, + 782, + 774, + 799 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Dynamic Patch Selection (DPS) is considered a discriminative task that assigns significance scores to visual patches and selects valuable patches based on high scores. For the image in an image-text pair, we opt for vision Transformers as the visual encoder. The image $V$ is divided into $N_v$ non-overlapping patches by spatial distribution.", + "bbox": [ + 501, + 800, + 921, + 888 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "These patches are then input as a visual token sequence into the vision Transformer to obtain a set of visual", + "bbox": [ + 503, + 888, + 921, + 917 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025", + "bbox": [ + 73, + 31, + 517, + 44 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 4 + }, + { + "type": "footer", + "text": "1. https://platform.openai.com", + "bbox": [ + 517, + 928, + 705, + 941 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/862dcad0b826d0cd5df4ca36a57269b05df71ee3565aaae7bcebc5ed440fcd2d.jpg", + "table_caption": [ + "TABLE2 Example prompts for impression rationale generation." + ], + "table_footnote": [], + "table_body": "
TypePrompts
System PromptYou are an AI assistant specializing in multimodal emotion and aesthetic understanding, especially in analyzing the emotional responses elicited by visual content.
Impression Rationale Generation PromptYou will be given an image-text pair. Your task is to analyze the specified entity {aspect} and its associated sentiment label {label} based entirely on the image's aesthetic attributes and the emotional resonance it conveys.Focus exclusively on the overall impression and visual connotations conveyed by the image, emphasizing why the assigned sentiment {label} aligns with the general mood or perception evoked by the entity. Avoid mentioning specific details; instead, high-light the prevailing emotional or aesthetic impression.
", + "bbox": [ + 86, + 90, + 480, + 366 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "patch features $V = (v_{cls}, v_1, v_2, \\ldots, v_{N_v}) \\in \\mathbb{R}^{(N_v + 1) \\times d}$ . For sentence $S$ , a pre-trained Transformer serves as the textual encoder. The sentence is tokenized into $N_s$ tokens and processed by the encoder to extract linguistic features $S = (s_1, s_2, \\ldots, s_{N_s}) \\in \\mathbb{R}^{N_s \\times d}$ . Subsequently, we incorporate spatial information from images into visual patch features and use an MLP-based score-sensitive prediction mechanism to learn significant scores:", + "bbox": [ + 71, + 407, + 490, + 527 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\np _ {i} ^ {s} = \\operatorname {S i g m o i d} \\left(\\mathbf {M L P} \\left(\\boldsymbol {v} _ {i}\\right)\\right), i \\in \\{1, 2, \\dots , N _ {v} \\}, \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 120, + 534, + 488, + 551 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $p_i^s \\in [0,1]$ represents the importance score assigned to each visual patch. Moreover, achieving refined cross-modal alignment requires more than depending solely on a scoring mechanism to identify valuable visual patches without linguistic supervision [76], [77]. Consequently, we introduce linguistic context by calculating attentive scores between visual patches and the input sentence. First, we derive linguistic-aware scores $p_i^l$ through cross-attention between visual patches and linguistic features. Then, we enhance key visual content by computing self-attention within patches, producing image-prominent scores $p_i^e$ :", + "bbox": [ + 71, + 556, + 488, + 719 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\np _ {i} ^ {l} = \\operatorname {N o r m} \\left(\\boldsymbol {v} _ {i} \\cdot S / d\\right), p _ {i} ^ {e} = \\operatorname {N o r m} \\left(\\boldsymbol {v} _ {i} \\cdot V / d\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 119, + 724, + 488, + 744 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\text{Norm}(\\cdot)$ denotes the normalization of scores to a range from 0 to 1. $S$ and $V$ represent the global embeddings for linguistic features and visual patches, respectively. These scores are integrated to derive the final value score:", + "bbox": [ + 71, + 750, + 490, + 808 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\np _ {i} ^ {f} = (1 - \\beta) p _ {i} ^ {s} + \\frac {\\beta}{2} \\left(p _ {i} ^ {l} + p _ {i} ^ {e}\\right), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 814, + 488, + 843 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\beta$ refers to the weight parameter. After obtaining the value score $p^f = (p_1^f, p_2^f, p_3^f, \\ldots, p_{N_v}^f) \\in \\mathbb{R}^{N_v}$ , we convert it into a binary decision matrix $\\{0, 1\\}^{N_v}$ to determine patch selection. This matrix is constructed using the Gumbel-Softmax technique [78], ensuring a smooth and differentiable sampling process. The Gumbel-Softmax matrix", + "bbox": [ + 71, + 848, + 491, + 944 + ], + "page_idx": 5 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 2 Linguistic-aware Semantic Alignment (LSA)" + ], + "code_body": "1: procedure DYNAMIC PATCH SELECTION(V, S) \n2: Extract visual patches $V \\leftarrow \\mathrm{ViT}(V)$ , text tokens $S \\leftarrow$ TextEnc(S) \n3: Compute significance scores: $p_i^s \\leftarrow \\mathrm{MLP}(v_i)$ , $p_i^l \\leftarrow \\mathrm{Norm}(v_i S^\\top)$ , $p_i^e \\leftarrow \\mathrm{Norm}(v_i V^\\top)$ \n4: Fuse scores: $p_i^f \\leftarrow (1 - \\beta)p_i^s + \\frac{\\beta}{2}(p_i^l + p_i^e)$ \n5: Apply Gumbel-Softmax sampling to obtain binary mask $D \\in \\{0, 1\\}^{N_v}$ \n6: Return selected patches $V^p \\leftarrow \\{v_i | D_i = 1\\}$ \n7: end procedure \n8: procedure SEMANTIC PATCH CALIBRATION( $V^p$ ) \n9: Aggregate key patches: $\\tilde{V}^p \\leftarrow \\mathrm{Softmax}(\\mathrm{MLP}(V^p)) \\cdot V^p \\quad \\triangleright$ Adaptive weighting \n10: Fuse redundant patches: $\\tilde{v}^r \\leftarrow \\sum \\tilde{p}_i v_i \\quad \\triangleright$ Weighted sum via $p^f$ \n11: Return $\\tilde{V}^p \\leftarrow [v_{cls}; \\tilde{V}^p; \\tilde{v}^r]$ \n12: end procedure \n13: procedure PATCH-TOKEN ALIGNMENT( $\\tilde{V}^p, S$ ) \n14: Compute cosine similarity matrix $A \\in \\mathbb{R}^{(N_f + 2) \\times N_s}$ \n15: Calculate alignment score $K(V, S) \\leftarrow \\frac{1}{2} (\\text{mean}(\\text{max}_j A_{ij}) + \\text{mean}(\\text{max}_i A_{ij}))$ \n16: Optimize with $\\mathcal{L}_{\\text{align}} \\leftarrow \\text{Bi-directional Triplet Loss}(K(V, S), K(V, \\hat{S}), K(\\hat{V}, S))$ \n17: end procedure", + "bbox": [ + 509, + 68, + 921, + 424 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "is defined as:", + "bbox": [ + 504, + 450, + 601, + 464 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {M} _ {i, l} = \\frac {\\exp \\left(\\log \\left(\\boldsymbol {m} _ {i , l} + G _ {i , l}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {L} \\exp \\left(\\log \\left(\\boldsymbol {m} _ {i , j} + G _ {i , j}\\right) / \\tau\\right)}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 568, + 469, + 921, + 506 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $M \\in \\mathbb{R}^{N_v \\times L}$ , $L$ indicates the total number of categories. In this scenario, $L$ is set to 2 for the binary decision $(\\pmb{m}_{i,1} = p_i^f, \\pmb{m}_{i,2} = 1 - p_i^f)$ . $G_i = -\\log (-\\log (U_i))$ represents the Gumbel distribution, $U_i$ refers to the uniform distribution and $\\tau$ is the temperature parameter.", + "bbox": [ + 503, + 511, + 921, + 585 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Next, we obtain the differentiable decision matrix $D$ by applying the arg-max on $M$ :", + "bbox": [ + 503, + 585, + 921, + 617 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {D} = \\operatorname {S a m p l i n g} (\\boldsymbol {M}) _ {*}, 1 \\in \\{0, 1 \\} ^ {N _ {v}}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 591, + 622, + 921, + 640 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $D$ indicates patch selection outcomes: \"1\" for important patches and \"0\" for redundant ones. In the training stage, gradients are backpropagated through the differentiable decision matrix, enabling the dynamic selection of valuable visual patches via the score-sensitive prediction mechanism.", + "bbox": [ + 501, + 646, + 921, + 734 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.5.2 Semantic Patch Calibration(SPC)", + "text_level": 1, + "bbox": [ + 504, + 747, + 790, + 763 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "This section aims to further refine the semantic representation of the selected valuable visual patches. After dynamically selecting important visual patches guided by linguistic supervision, we designate them as $V^{p} = \\left(v_{1}^{p}, v_{2}^{p}, \\ldots, v_{N_{p}}^{p}\\right) \\in \\mathbb{R}^{N_{p} \\times d}$ . $N_{p}$ is the number of selected valuable visual patches. We employ an aggregation network [79] to model multiple aggregation weights and combine the selected $N_{p}$ visual patches to generate $N_{f}$ informative visual features:", + "bbox": [ + 501, + 765, + 921, + 901 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\boldsymbol {v}} _ {j} ^ {p} = \\sum_ {i = 1} ^ {N _ {p}} (\\boldsymbol {W}) _ {i j} \\cdot \\boldsymbol {v} _ {i} ^ {p}, \\quad j = [ 1, \\dots , N _ {f} ], \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 575, + 905, + 921, + 946 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025", + "bbox": [ + 73, + 32, + 517, + 44 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {W} = \\operatorname {s o f t m a x} \\left(\\mathbf {M L P} \\left(\\boldsymbol {V} ^ {p}\\right)\\right), \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 181, + 51, + 488, + 70 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $(\\mathbf{W})$ denotes the normalized weight matrix and $\\sum_{i=1}^{N_s} (\\mathbf{W})_{ij} = 1$ . $N_f$ is the number of aggregated patches $(N_f < N_p)$ . The aggregation network adaptively combines visually similar patches and is differentiable for end-to-end training. While redundant visual patches can be discarded, they may contain supplementary semantic features for refined cross-modal alignment. Therefore, we fuse them into a single patch:", + "bbox": [ + 71, + 75, + 490, + 193 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\boldsymbol {v}} ^ {r} = \\sum_ {i \\in \\mathcal {N}} \\tilde {p} _ {i} \\cdot \\boldsymbol {v} _ {i}, \\quad \\tilde {p} _ {i} = \\frac {\\exp \\left(p _ {i} ^ {f}\\right) \\boldsymbol {D} _ {i}}{\\sum_ {i = 1} ^ {N} \\exp \\left(p _ {i} ^ {f}\\right) \\boldsymbol {D} _ {i}}, \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 127, + 200, + 490, + 250 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\mathcal{N}$ represents the set for redundant visual patches. $\\tilde{p}_i$ denotes the normalized score of the value score $p_i^f$ . Finally, this component models the calibrated refined visual patches, denoted as $\\tilde{V}^p = (v_{cls},\\tilde{v}_1^p,\\tilde{v}_2^p,\\dots ,\\tilde{v}_{N_f}^p,\\tilde{v}^r)\\in \\mathbb{R}^{(N_f + 2)\\times d}$ .", + "bbox": [ + 71, + 253, + 491, + 321 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.5.3 Patch-token Alignment(PTA)", + "text_level": 1, + "bbox": [ + 73, + 330, + 323, + 347 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "This module aims to achieve the fine-grained patch-token level alignment. Specifically, we first utilize the refined visual patches $\\tilde{V}^p$ and linguistic features $S$ to compute tokenwise similarities, producing a patch-token similarity matrix $A\\in \\mathbb{R}^{(N_f + 2)\\times N_s}$ . $(A)_{ij} = \\frac{(\\tilde{v}_i)^T s_j}{\\|\\tilde{v}_i\\| \\|s_j\\|}$ denotes the patch-token level alignment score between the $i$ -th visual patch and the $j$ -th word. Subsequently, maximum-correspondence interaction is introduced to aggregate cross-modal alignment. For each visual patch (or token), we identify the most aligned textual token (or patch) and calculate the average alignment score $K(V,S)$ , representing the overall alignment between the image $V$ and the sentence $S$ :", + "bbox": [ + 71, + 349, + 490, + 529 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nK (V, S) = \\frac {1}{N _ {f} + 2} \\sum_ {i = 1} ^ {N _ {f} + 2} \\max _ {j} (\\boldsymbol {A}) _ {i j} + \\frac {1}{N _ {s}} \\sum_ {j = 1} ^ {N _ {s}} \\max _ {i} (\\boldsymbol {A}) _ {i j} \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 89, + 536, + 488, + 589 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Following a previous method [80], the bi-direction triplet loss with hard negative mining is exploited:", + "bbox": [ + 71, + 590, + 488, + 619 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\text {a l i g n}} = \\sum_ {(V, S)} [ \\gamma - K (V, S) + K (V, \\hat {S}) ] _ {+} \\tag {13} \\\\ + [ \\gamma - K (V, S) + K (\\hat {V}, S) ] _ {+}, \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 142, + 626, + 488, + 679 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\gamma$ is the trade-off parameter. $[x]_{+} = \\max (x,0)$ and $(V,S)$ refers to a positive image-text pair in the mini-batch. Moreover, $\\hat{S} = \\operatorname{argmax}_{j\\neq S}K(V,j)$ and $\\hat{V} = \\operatorname{argmax}_{i\\neq V}K(i,V)$ indicate the hardest negative sentence and visual examples within a mini-batch, respectively.", + "bbox": [ + 71, + 685, + 490, + 760 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.6 Rationale-aware Learning", + "text_level": 1, + "bbox": [ + 71, + 777, + 305, + 792 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To endow the model with the ability to perform semantic causality and impression reasoning, we propose a rationale-aware learning framework designed to fine-tune a sequence-to-sequence (seq2seq) model. This seq2seq model is proposed to achieve three task objectives for each specific target within the image-text pair: sentiment classification (SC), semantic rationale generation (SRG), and impression rationale generation (IRG). These tasks are differentiated by the use of distinct input configurations and input content. For SC, the decoder outputs only the predicted sentiment", + "bbox": [ + 71, + 796, + 490, + 941 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "polarity. In SRG and IRG, the decoder produces the corresponding rationale and the sentiment prediction.", + "bbox": [ + 503, + 53, + 919, + 82 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Specifically, our input comprises the textual sentence $S = (s_{1}, s_{2}, \\ldots, s_{N_{s}})$ , the overall aesthetic caption of the image $A^{c} = (a_{1}^{c}, a_{2}^{c}, \\ldots, a_{N_{c}}^{c})$ , the object-level description $A^{o} = (a_{1}^{o}, a_{2}^{o}, \\ldots, a_{N_{o}}^{o})$ , which pertains to either facial or aesthetic attributes and the specific target $T$ . The input format is determined by the presence of the specific target within the visual content. For example, if the specific target is identified in the image, based on the annotations provided by Wang et al. [25], the input for SC, SRG, and IRG is defined as follows:", + "bbox": [ + 503, + 82, + 921, + 227 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nH ^ {\\mathrm {s c}} = \\operatorname {e n c o d e r} \\left(t _ {\\langle \\mathrm {s c} \\rangle}, A ^ {c}, S, T\\right), \\tag {14}\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 234, + 921, + 252 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nH ^ {\\mathrm {s r g}} = \\operatorname {e n c o d e r} \\left(t _ {\\langle \\mathrm {s r g} \\rangle}, A ^ {c}, S, T\\right), \\tag {15}\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 258, + 919, + 275 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nH ^ {\\text {i r g}} = \\operatorname {e n c o d e r} \\left(t _ {\\langle \\mathrm {i r g} \\rangle}, A ^ {c}, S, T\\right), \\tag {16}\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 282, + 919, + 299 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where encoder $(\\cdot)$ is the Transformer encoder of the seq2seq model. The tokens $t_{\\langle \\mathrm{sc}\\rangle}, t_{\\langle \\mathrm{src}\\rangle},$ and $t_{\\langle \\mathrm{irg}\\rangle}$ are specialized tokens designed to represent distinct tasks. Although the specific aspects are not present in the image, this does not imply that sentimental cues from the image have no impact on predicting the sentiment polarity. On the contrary, incorporating sentiment cues from the holistic image can provide valuable insights into the influence of image aesthetic attributes on the sentiment prediction for the specific aspect. For samples where specific targets are present in the visual content, the input format is structured as follows:", + "bbox": [ + 501, + 306, + 921, + 467 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nH ^ {\\mathrm {s c}} = \\operatorname {e n c o d e r} \\left(t _ {\\langle \\mathrm {s c} \\rangle}, S, A ^ {o}, T\\right), \\tag {17}\n$$\n", + "text_format": "latex", + "bbox": [ + 604, + 474, + 919, + 491 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nH ^ {\\mathrm {s r g}} = \\operatorname {e n c o d e r} \\left(t _ {\\left(\\mathrm {s r g}\\right)}, S, A ^ {o}, T\\right), \\tag {18}\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 497, + 919, + 513 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nH ^ {\\text {i r g}} = \\operatorname {e n c o d e r} \\left(t _ {\\langle \\text {i r g} \\rangle}, S, A ^ {o}, T\\right). \\tag {19}\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 521, + 919, + 537 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We employ fine-grained, object-level emotion-laden descriptions to establish alignment between specific targets and their corresponding objects in the image, which enhances both the accuracy and interpretability of the sentiment prediction process. Subsequently, these hidden features are passed through a stack of self-attention-based encoders, which dynamically fuse representations and model both intra-modal and cross-modal interactions. Finally, the decoder produces task-specific outputs. For Sentiment Classification (SC), the decoder generates the predicted sentiment polarity, selecting from \"positive,\" \"negative,\" or \"neutral,\" denoted as $\\hat{y}_{sc}$ :", + "bbox": [ + 503, + 544, + 921, + 719 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nG ^ {\\mathrm {s c}} = \\left[ \\langle \\mathrm {s e n} \\rangle \\hat {y} ^ {\\mathrm {s c}} \\langle / \\mathrm {s e n} \\rangle \\right], \\tag {20}\n$$\n", + "text_format": "latex", + "bbox": [ + 627, + 726, + 919, + 744 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where the special tokens $\\langle \\mathrm{sen}\\rangle$ and $\\langle / \\mathrm{sen}\\rangle$ are denoted as the start and end markers for SC predictors. For the two additional rationale generation tasks SRG and IRG, the decoder generates not only the semantic rationale $\\hat{s}r$ and impression rationale $\\hat{i}r$ for the specific target but also their corresponding sentiment predictions $\\hat{y}_{sr}$ and $\\hat{y}_{si}$ :", + "bbox": [ + 501, + 750, + 921, + 839 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nG ^ {\\mathrm {s r}} = \\left[ \\langle \\mathrm {s r} \\rangle \\hat {s} r \\langle / \\mathrm {s r} \\rangle \\langle \\mathrm {s e n} \\rangle \\hat {y} ^ {\\mathrm {s r}} \\langle / \\mathrm {s e n} \\rangle \\right], \\tag {21}\n$$\n", + "text_format": "latex", + "bbox": [ + 591, + 845, + 919, + 864 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nG ^ {\\mathrm {i r}} = \\left[ \\langle \\mathrm {i r} \\rangle \\hat {i r} \\langle / \\mathrm {i r} \\rangle \\langle \\mathrm {s e n} \\rangle \\hat {y} ^ {\\mathrm {i r}} \\langle / \\mathrm {s e n} \\rangle \\right], \\tag {22}\n$$\n", + "text_format": "latex", + "bbox": [ + 593, + 867, + 919, + 892 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\langle \\mathrm{sr}\\rangle$ , $\\langle / \\mathrm{sr}\\rangle$ , $\\langle \\mathrm{ir}\\rangle$ , $\\langle / \\mathrm{ir}\\rangle$ , $\\langle \\mathrm{sen}\\rangle$ , and $\\langle / \\mathrm{sen}\\rangle$ serve as specialized markers to delineate the rationale and sentiment polarity. Finally, the input sequence is uniformly denoted", + "bbox": [ + 501, + 897, + 921, + 941 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025", + "bbox": [ + 73, + 32, + 517, + 44 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "as $X$ , and the generated textual content is represented as $Z = \\{z_{1}, z_{2}, \\ldots, z_{N_{z}}\\}$ . Consequently, the loss function for the generation process is formulated as follows:", + "bbox": [ + 73, + 53, + 491, + 98 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {Z} = - \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\sum_ {n _ {z} = 1} ^ {N _ {z}} \\log P \\left(z _ {i, n _ {z}} \\mid \\hat {z} _ {i, < n _ {z}}, X\\right), \\tag {23}\n$$\n", + "text_format": "latex", + "bbox": [ + 127, + 104, + 488, + 143 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $z_{i,n_z}$ is the ground truth token at position $n_z$ for sample $i$ , $\\hat{z}_{i, < n_z}$ represents the generated sequence up to position $n_z - 1$ for sample $i$ , and $P(z_{i,n_z} \\mid \\hat{z}_{i, < n_z}, X)$ denotes the probability of generating token $z_{i,n_z}$ conditioned on $\\hat{z}_{i, < n_z}$ and $X$ . In this rationale-aware learning framework, since all objectives are formulated as generative tasks, the loss functions $\\mathcal{L}_{SC}$ , $\\mathcal{L}_{SRG}$ , and $\\mathcal{L}_{IRG}$ are all employ the generative loss function, E.q. 23. Therefore, the objective function in the proposed method is formulated as follows:", + "bbox": [ + 71, + 150, + 491, + 282 + ], + "page_idx": 7 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} = \\alpha \\mathcal {L} _ {\\mathrm {S C}} + \\frac {1 - \\alpha}{2} \\mathcal {L} _ {\\mathrm {S R G}} + \\frac {1 - \\alpha}{2} \\mathcal {L} _ {\\mathrm {I R G}} + \\lambda \\mathcal {L} _ {\\text {a l i g n}}, \\tag {24}\n$$\n", + "text_format": "latex", + "bbox": [ + 91, + 287, + 488, + 316 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "where $\\alpha, \\lambda \\in (0,1)$ are tradeoff hyperparameters that regulate the relative contributions of each generative loss and the patch-token alignment.", + "bbox": [ + 71, + 321, + 491, + 367 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4 EXPERIMENTS", + "text_level": 1, + "bbox": [ + 73, + 388, + 246, + 404 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In this section, we provide a comprehensive description of the experimental settings and evaluate the proposed method on three publicly available MASC datasets, benchmarking it against state-of-the-art methods. Furthermore, we perform an extensive series of studies to thoroughly analyze the effectiveness of the proposed approach.", + "bbox": [ + 71, + 410, + 491, + 500 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.1 Experimental Settings", + "text_level": 1, + "bbox": [ + 73, + 520, + 279, + 536 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.1.1 Datasets", + "text_level": 1, + "bbox": [ + 73, + 539, + 191, + 554 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We utilize three widely recognized benchmark datasets for MASC [11], [81]: Twitter-2015, Twitter-2017, and the Political Twitter dataset. Each sample within these datasets comprises a user-generated multimodal image-text pair, including an image, a textual sentence, and one or more specific targets. Each aspect is annotated with a sentiment label from the set Positive, Negative, Neutral. The detailed statistics of these datasets are presented in Table 3. Furthermore, we incorporate semantic rationale (SR), impression rationale (IR), aesthetic captions for the entire image (AC), facial descriptions (FD), and aesthetic captions for objects (AO) for each data point. The maximum length for facial descriptions and aesthetic captions is constrained to 50 tokens.", + "bbox": [ + 71, + 558, + 491, + 750 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.1.2 Implementation Details", + "text_level": 1, + "bbox": [ + 73, + 763, + 287, + 777 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We adopt the seq2seq model Flan-T5 [82] as the backbone of our generative framework. Specifically, the model is trained for 10 epochs using the AdamW optimizer [83], with a batch size of 4. A grid search is performed on the development set to determine the optimal learning rate, $\\alpha$ and $\\lambda$ for Flan-T5 across the three datasets. The selected values for learning rate are $3e - 4$ , $3e - 4$ , $1e - 4$ , respectively, for the Twitter-2015, Twitter-2017 and Political Twitter. The trade-off hyperparameter sets $(\\alpha$ and $\\lambda)$ are 0.2, 0.1, 0.2 and 0.2, 0.5, 0.5, respectively, for the Twitter-2015, Twitter-2017 and Political Twitter. Consistent with prior research on MASC [11], [24],", + "bbox": [ + 71, + 781, + 491, + 944 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "we employ Accuracy (Acc) and F1 score (F1) as the evaluation metrics. The model is implemented using PyTorch, and experiments are conducted on an NVIDIA V100 GPU with 30 GB of memory.", + "bbox": [ + 503, + 53, + 923, + 112 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2 Compared Baselines", + "text_level": 1, + "bbox": [ + 504, + 128, + 702, + 143 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We conducted a comprehensive comparative evaluation of the proposed method against a range of robust baseline approaches, which are classified into three categories. The first category consists of image-only methods:", + "bbox": [ + 503, + 148, + 923, + 208 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "- Res-Target [84] leverages ResNet as its backbone to extract visual features exclusively for predicting the sentiment of the specified target.", + "bbox": [ + 527, + 214, + 921, + 258 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The second category includes text-only approaches:", + "bbox": [ + 504, + 265, + 866, + 281 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- MemNet [85] employs a stacked architecture of multiple memory networks to build deep memory networks.", + "- MGAN [86] is based on a multi-grained attention architecture designed to adaptively capture both coarse-grained and fine-grained interactions.", + "- BERT [87] is a powerful pre-trained language model trained using a masked language modeling objective and next sentence prediction." + ], + "bbox": [ + 527, + 286, + 921, + 417 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Finally, this study incorporates the following advanced image-text multimodal approaches:", + "bbox": [ + 504, + 425, + 921, + 455 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- MIMN [88] comprises two customized interactive memory networks designed to capture both inter-modal dynamics between different modalities and intra-modal dynamics within each individual modality.", + "- ESAFN [12] is a target-sensitive interaction and fusion network designed to adaptively capture interactive features across modalities while also modeling intra-modality features.", + "- TomBERT [11] utilizes BERT and ResNet as backbone models for encoding textual and visual content, respectively. Cross-modal fusion is accomplished by integrating these features into a BERT encoder.", + "- JML-MASC [44] jointly extracts the specific targets and identifies their sentiment polarity by utilizing a visual de-nosing mechanism and attention-based fusion framework.", + "- EF-CapTrBERT [17] converts visual content into an auxiliary sentence, which is then combined with the input sentence and processed through a BERT encoder for sentiment prediction.", + "- VLP-MABSA [14] is a task-specific pre-trained generative framework for multimodal aspect-based sentiment analysis, built on the BART architecture.", + "- FITE [23] is a translation-based approach, which captures facial features in the image and translates them into a corresponding facial description as an auxiliary sentence for sentiment classification.", + "- CMMT-MASC [15] is a cross-modal multi-task Transformer designed for MASC. Additionally, it employs multimodal gating mechanisms to dynamically regulate the flow of textual and visual information during interactions." + ], + "bbox": [ + 527, + 460, + 921, + 943 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025", + "bbox": [ + 73, + 31, + 517, + 44 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/703550cb2e9d3a9aa1f9ae05fed89527e34c05dc0ccac49b3b4ddcc077b7558f.jpg", + "table_caption": [ + "TABLE 3 Detailed Statistics of Twitter-2015, Twitter-2017, and Political Twitter datasets. The \"#sentence\" refers to the total number of sentences. \"#Avg. Length\" denotes the average length of sentences, while \"#Avg. Aspect\" indicates the average number of aspects in a sentence. \"#Avg. Length of SR\", \"#Avg. Length of IR\", \"#Avg. Length of AC\", \"#Avg. Length of FD\", and \"#Avg. Length of AO\" correspond to the average lengths of semantic rationales (SR), impression rationales (IR), aesthetic captions for the entire image, facial descriptions, and aesthetic captions for objects." + ], + "table_footnote": [], + "table_body": "
LabelTwitter-2015Twitter-2017Political Twitter
TrainDevTestTrainDevTestTrainDevTest
Positive92830331715085154933318570176
Neutral188367060716385175734697823368
Negative368149113416144168887166305
Total31791122103735621176123489021559849
#Sentence210172767417465775875105900407
#Avg. Length16.7216.7417.0516.2116.3716.3816.6216.6716.59
#Avg. Aspect1.511.541.542.042.042.101.741.732.09
#Avg. Length of SR42.542.442.542.642.843.042.742.642.2
#Avg. Length of IR56.756.055.755.556.155.455.956.156.3
#Avg. Length of AC35.935.935.532.532.531.634.034.233.3
#Avg. Length of FD39.238.537.838.938.539.339.038.438.7
#Avg. Length of AO29.129.730.328.929.428.929.129.131.3
", + "bbox": [ + 155, + 126, + 838, + 377 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- HIMT [89] is a Transformer framework that incorporates a hierarchical interaction component to model the relationships between specific aspects and the input sentence, as well as the interactions between specific aspects and object-level visual content.", + "- IMT [13] is a coarse-to-fine-grained multimodal matching network that predicts image-target relevance and performs object-target alignment to support sentiment polarity identification.", + "- CoolNet [19] is a fine-grained cross-modal alignment approach that aligns textual and visual content from both semantic and syntactic perspectives.", + "- UnifiedTMSC [90] introduces a descriptive prompt paraphrasing paradigm to generate paraphrased prompts, while optimizing image vectors within the multimodal representation space of vision and language.", + "- VEMP [91] decodes the semantic information of visual elements by utilizing textual tokens in the image, target-aware adjective-noun pairs, and image captions.", + "- Atlantis-MASC [22] is a trident-shaped, aesthetic-driven approach for joint MABSA, which integrates image aesthetic attributes and achieves effective alignment of vision and text across multiple granular levels.", + "- MDCA [24] is a generative framework proposed to provide supplementary reasoning and explicit rationales to explain why specific content conveys certain sentiment." + ], + "bbox": [ + 96, + 401, + 491, + 838 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.3 Main Results", + "text_level": 1, + "bbox": [ + 73, + 864, + 210, + 878 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The main results are presented in Table 4. Given that the two additional rationale generation tasks contribute to improving sentiment prediction by providing explanations for the underlying causes of sentiment, we select the prediction", + "bbox": [ + 71, + 883, + 490, + 944 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "results from sentiment classification $\\hat{y}^{\\mathrm{sc}}$ as the primary outcomes for accuracy and F1 score evaluation. As presented in Table 4, the proposed method demonstrates competitive performance on both Twitter datasets compared to strong baselines from both text-only and multimodal approaches. Specifically, it achieves the highest accuracy (81.61%) and F1 score (77.98%) on the Twitter-2015 dataset, as well as the best accuracy (75.62%) and a near-optimal F1 score (74.59%) on the Twitter-2017 dataset.", + "bbox": [ + 501, + 401, + 921, + 532 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Compared to the image-only approach (Res-Target), the proposed method achieves a remarkable improvement of over $21.73\\%$ in accuracy on the Twitter-2015 dataset. Similarly, when compared to the best-performing text-only method (BERT), the proposed method demonstrates a substantial performance gain, with a $7.46\\%$ increase in accuracy and a $9.12\\%$ improvement in F1 on Twitter-2015. These observations underscore the limitations of single-modality approaches in capturing subtle sentiment cues from multimodal content. Moreover, the proposed method consistently outperforms recent multimodal models, such as UnifiedTMSC, Atlantis-MASC, and MDCA. For instance, UnifiedTMSC adopts a paraphrasing-based approach to enrich textual features but lacks explicit modeling of visual aesthetic-driven affective impact. On Twitter-2017, the proposed method achieves comparable F1 performance (74.59 vs. 74.70) while delivering higher accuracy (75.62 vs. 75.40), which highlights the complementary benefits of aesthetic affective resonance modeling. While Atlantis-MASC incorporates image aesthetics, it primarily relies on global alignment techniques, which may overlook the intricate relationships between aspects and objects. The proposed method surpasses Atlantis-MASC by $1.58\\%$ in accuracy on Twitter-2017, underscoring the efficacy of its patch-token level and object-level alignment in capturing aspect-specific visual details. While MDCA incorporates reasoning and direct causality to explain sentiment causes, it primarily emphasizes textual semantic reasoning, which restricts its", + "bbox": [ + 501, + 534, + 921, + 944 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025", + "bbox": [ + 73, + 32, + 517, + 44 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 911, + 32, + 921, + 42 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/9a52e7f0d93ba6fe3d0396f07a292739097a1fad75755af749ec8dd06624ef3c.jpg", + "table_caption": [ + "TABLE 4 The main results $(\\%)$ are presented with the best-performing results highlighted in bold and the second-best values indicated with underlined text." + ], + "table_footnote": [], + "table_body": "
ModalityModelVenueTwitter-2015Twitter-2017Political Twitter
AccF1AccF1AccF1
Image OnlyRes-TargetCVPR 201659.8846.4858.5953.9860.2158.42
Text OnlyMemNetEMNLP 201670.1161.7664.1860.90--
MGANEMNLP 201871.1764.2164.7561.4667.3762.78
BERTNAACL 201974.1568.8668.1565.2369.4164.25
Image and TextMIMNAAAI 201971.8465.6965.8862.9970.5265.39
ESAFNTASLP 201973.3867.3767.8364.2269.2264.66
TomBERTIJCAI 201977.1571.1570.3468.0369.6562.35
JML-MASCEMNLP 202178.70-72.70-70.1468.37
EF-CapTrBERTACM MM 202178.0173.2569.7768.4269.0464.94
VLP-MABSAACL 202278.6073.8073.8071.8070.3269.64
CMMT-MASCIPM 202277.90-73.8---
FITEEMNLP 202278.4973.9070.9068.7068.6465.83
HIMTTAC 202278.1473.6871.1469.16--
IMTIJCAI 202278.2774.1972.6171.9769.9267.86
CoolNetIPM 202379.9275.2871.6469.5870.9170.25
UnifiedTMSCEMNLP 202379.8076.3075.4074.70--
VEMPEMNLP 202378.8875.0973.0172.42--
Atlantis-MASCINFFUS 202479.03-74.20-69.8368.97
MDCATNNLS 202480.7177.1573.9172.3771.3870.94
OursChimera-81.6177.9875.6274.5972.5672.32
", + "bbox": [ + 135, + 92, + 857, + 457 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ability to effectively capture detailed visual content and the corresponding aesthetic affective resonance. In contrast, the proposed method surpasses MDCA with a $0.90\\%$ improvement in accuracy and a $0.83\\%$ increase in F1 on the Twitter-2015 dataset. This performance gain highlights the advantages of comprehensively understanding sentiment causality from both visual-textual semantic and affective resonance perspectives.", + "bbox": [ + 71, + 479, + 491, + 599 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.4 Results on Political Twitter", + "text_level": 1, + "bbox": [ + 71, + 616, + 313, + 631 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "The Political Twitter dataset differs significantly from Twitter-2015 and Twitter-2017, especially due to its challenging domain shift between training, development, and test sets. Such domain differences create substantial barriers to generalization, which makes the task particularly suitable for advanced models that can comprehend subtle causality and context shifts.", + "bbox": [ + 71, + 635, + 490, + 738 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "From Table 4, it can be observed that the proposed Chimera demonstrates distinct advantages over existing approaches on the Political Twitter dataset. Compared to the third best performing method CoolNet, which achieved $71.32\\%$ accuracy and $69.64\\%$ F1 score, Chimera showcases a significant improvement. Similarly, MDCA, which performed with an accuracy of $71.38\\%$ and an F1 score of $70.94\\%$ , still lags behind Chimera. Additionally, we observed that the discrepancy between accuracy and F1-score significantly narrows as accuracy increases, particularly when accuracy surpasses $70\\%$ . We hypothesize that the underlying cause may lie in the relatively balanced class distribution of sentiment categories (e.g., positive, neutral,", + "bbox": [ + 71, + 752, + 491, + 944 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "negative) within the Political Twitter test set (as shown in Table 3). At higher accuracy levels, the ratios of false positives to false negatives exhibit increasing symmetry across models. This equilibrium consequently reduces the divergence between precision and recall metrics, thereby causing the F1-score - defined as their harmonic mean - to naturally converge with accuracy.", + "bbox": [ + 501, + 479, + 924, + 585 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.5 Ablation Study", + "text_level": 1, + "bbox": [ + 504, + 602, + 658, + 618 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "To systematically investigate the influence of the linguistic-aware semantic alignment module, including semantic and impression rationale reasoning as well as object-level fine-grained alignment, on sentiment prediction, we conducted a series of ablation studies and the results are shown in Table 5.", + "bbox": [ + 501, + 621, + 921, + 708 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "As presented in Table 5, the exclusion of semantic rationale (\"w/o SRG\") results in a noticeable performance decline across all three datasets. This effect is particularly pronounced on the Twitter-2017 and Political Twitter datasets, where nearly all evaluation metrics, including accuracy and F1 score, exhibit a reduction of approximately $2\\%$ . Similarly, the absence of impression rationale reasoning (\"w/o IRG\") results in performance fluctuations on the Twitter-2015 and Political Twitter datasets. However, the most noticeable effect is observed on the Twitter-2017 dataset, where the model's performance exhibits a significant degradation, particularly in the sentiment classification task, with nearly a $4\\%$ drop in both accuracy and F1 score. The results (\"w/o IRG & AC\") reveal consistent performance degradation in both Accuracy and F1-score across all three datasets. Particularly noteworthy", + "bbox": [ + 501, + 709, + 923, + 944 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025", + "bbox": [ + 73, + 31, + 517, + 42 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 906, + 32, + 923, + 42 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/b345948b6fc756b2505bb224ee746b6e0583cff242cc6fb931a837f9fd22f931.jpg", + "table_caption": [ + "TABLE 5 The results $(\\%)$ of the ablation study for our Chimera model are presented. The top-performing values emphasized in bold and the second-best values distinguished using underlined text. The notations \"w/o SRG,\" \"w/o IRG,\" and \"w/o SRG & IRG\" denote the exclusion of the respective generative tasks. \"w/o IRG & AC\" refers to the removal of IR generation task and replace the aesthetic caption (AC) with general caption. \"w/o LSA\" represents the removal of the Linguistic-aware Semantic Alignment branch, while \"w/o OD\" indicates the exclusion of object-level descriptions (e.g., facial descriptions and object-level aesthetic captions) from the input sequence." + ], + "table_footnote": [], + "table_body": "
MethodTwitter-2015Twitter-2017Political Twitter
AccF1AccF1AccF1AccF1AccF1AccF1AccF1AccF1AccF1
SCSRGIRGSCSRGIRGSCSRGIRG
Chimera81.6177.9881.1277.1177.5673.5575.6274.5975.0973.6471.9668.2372.5672.3271.6971.4069.3068.95
w/o SRG80.5276.10--75.8370.9673.5072.49--70.6667.2070.4369.88--68.2567.58
w/o IRG80.2375.2280.0375.42--71.8870.1672.670.73--71.1570.7071.0170.52--
w/o IRG & AC80.6776.0380.1176.46--71.5969.8372.2570.33--70.6270.0671.0470.47--
w/o SRG & IRG77.2471.82----71.2368.98----67.8867.20----
w/o LSA80.5477.0379.7576.2276.5272.0373.7270.9674.3872.2671.3667.8871.8671.3770.9270.5568.4367.99
w/o OD79.9676.0880.0976.3277.1272.8473.0670.8574.3772.3671.1167.5371.6471.1271.1270.7768.5568.07
w/o Aes-cap80.0375.2779.9476.0575.6971.0872.3671.6472.2871.2169.2865.4469.4368.9469.3769.0067.8567.27
", + "bbox": [ + 75, + 138, + 924, + 271 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/4da63db8ee69d0dc75759d822365f0103e1750ba58561bdf4257661e7d41d2c8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 73, + 282, + 356, + 412 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/08133c089ed25791aec10225277c395d31439bd5d26d91d73a8056da7ca18d6f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 367, + 282, + 638, + 412 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/ef430b2b0457b39e9f33224cf48720981aa5772b0bdcca9226f222196ae0fd43.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 651, + 282, + 923, + 412 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/d9c00fe46e11a776cce92beb84c5862921e6256a790e3398fc8e625562168895.jpg", + "image_caption": [ + "Fig. 2. Results $(\\%)$ on hyper-parameter of $\\alpha$ and $\\lambda$ ." + ], + "image_footnote": [], + "bbox": [ + 73, + 428, + 357, + 551 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/be9a387bddeba4b58e166a6984c8c5b0867e9a4c5e45612fe0aa5451666328af.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 367, + 426, + 638, + 550 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/050be5835958eb54c08c32fca9f8350415aab027b5d014443ebe81b11f308c55.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 651, + 429, + 923, + 550 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "is the model's inferior performance on Twitter-2017 and Political Twitter datasets compared to the baseline(w/o IRG). However, an unexpected performance improvement emerges in Twitter-2015, surpassing even the configuration retaining aesthetic captions as input. This phenomenon may be attributed to dataset-specific characteristics in sample distribution. As detailed in Table 3, Twitter-2015 exhibits a significantly higher proportion of neutral-class samples compared to Twitter-2017 and Political Twitter. When the Chimera model is deprived of its reasoning abilities for both semantic and impression rationales (\"w/o SRG & IRG\"), its performance on sentiment classification declines to the lowest levels across all datasets. Specifically, a consistent reduction of approximately $4 - 5\\%$ is observed in nearly all metrics, underscoring the essential role of rationale-based reasoning in enhancing the effectiveness and accuracy of sentiment analysis tasks. These results show that the influence of rationale reasoning differs across datasets. For Twitter-2017, with its balanced sentiment distribution (see Table 3), impression rationale has a greater impact on sentiment analysis. In contrast, both semantic and impression rationales contribute to the other two datasets, but neither is dominant.", + "bbox": [ + 71, + 603, + 491, + 938 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The LSA branch plays a pivotal role in the Chimera model by bridging the semantic gap between textual and visual modalities, ensuring effective alignment of information across visual and textual data. Its removal (w/o LSA) consistently leads to a significant decline in performance across all datasets, as evident in the ablation study. For instance, on Twitter-2015, the accuracy drops from $81.61\\%$ to $80.54\\%$ , and the F1 score decreases from $77.98\\%$ to $77.03\\%$ . Similarly, for Twitter-2017, accuracy, and F1 score dropped to $73.72\\%$ and $70.96\\%$ , respectively. By aligning linguistic and visual features, the branch allows the model to effectively interpret semantic overlaps and contrasts, enabling more accurate sentiment predictions.", + "bbox": [ + 501, + 619, + 924, + 809 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Object-level descriptions (e.g., facial expressions and object-level aesthetic captions) enrich the input sequence by providing object-level detailed visual context. The ablation study reveals that removing OD (w/o OD) causes noticeable performance drops. On Twitter-2015, accuracy drops by 1.65 percentage points, and the F1 score decreases by 1.90 percentage points. Similarly, on Twitter-2017, accuracy is reduced by 2.56 percentage points, while the F1 score drops by 3.74 percentage points. Without the OD, the model", + "bbox": [ + 501, + 811, + 924, + 944 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025", + "bbox": [ + 73, + 32, + 517, + 44 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 906, + 32, + 919, + 42 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "loses access to these fine-grained visual features, leading to diminished interpretability and accuracy, particularly in datasets where visual information plays a crucial role in determining sentiment. Additionally, the aesthetic caption is excluded from the input sequence to assess its impact on performance (w/o Aes-cap). As demonstrated in Table 5, the absence of aesthetic features results in a noteworthy decline in performance across all datasets, particularly in the impression rationale generation (IRG) task. This leads to Chimera exhibiting the poorest sentiment classification performance for IRG on the Twitter-2017 and Political Twitter datasets, which underscore the importance of aesthetic captions in guiding the model to generate coherent and emotionally nuanced impressions.", + "bbox": [ + 71, + 53, + 491, + 258 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.6 Hyper-parameter Analysis", + "text_level": 1, + "bbox": [ + 71, + 272, + 307, + 286 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We conduct a hyperparameter analysis to explore the impact of $\\alpha$ and $\\lambda$ on the Chimera model's performance across the Twitter-2015, Twitter-2017, and Political Twitter datasets. Hyperparameter $\\alpha$ regulates the balance between sentiment classification (SC) and rationale generation components (semantic and impression rationales, SRG, and IRG), while $\\lambda$ controls the weight of patch-token alignment within the overall loss function. As shown in Figure 2, for all datasets, a lower $\\alpha$ , which assigns greater weight to rationale generation, generally improves model performance, with values around 0.1 to 0.2 achieving the highest accuracy and F1 scores. This emphasizes the significance of integrating semantic and impression rationales in MASC. As $\\alpha$ increases, favoring SC loss, performance plateaus or declines, particularly for the Political Twitter dataset, indicating that reduced emphasis on rationale generation diminishes the model's ability to capture fine-grained sentiment context effectively. Moreover, the results indicate that increasing $\\lambda$ initially enhances model performance, with diminishing returns beyond a certain threshold. For the Twitter-2015 and Political Twitter datasets, moderate $\\lambda$ values [0.2, 0.5] achieve optimal accuracy and F1 scores, while higher values ( $\\lambda > 0.6$ ) lead to performance stabilization or slight decline. This observation indicates that balanced alignment between visual and textual features enhances the model's interpretability and accuracy and excessively high $\\lambda$ values may negatively impact performance, likely due to overemphasis on alignment at the expense of core sentiment classification. For Twitter-2017, a similar trend is observed, although performance variations are less pronounced.", + "bbox": [ + 71, + 290, + 491, + 729 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "5 IN-DEPTH ANALYSIS", + "text_level": 1, + "bbox": [ + 73, + 744, + 269, + 758 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "5.1 Quality Analysis of Rationale", + "text_level": 1, + "bbox": [ + 71, + 763, + 330, + 777 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Table 6 provides an evaluation of the sentiment rationale quality for both the ground-truth and Chimera-generated content, aiming to analyze their impact on sentiment analysis. A pre-trained sentiment classification model [92] is employed to assess the intuitive sentiment quality of these rationales across three test datasets by inputting the rationales into the model and analyzing the sentiment predictions. For both SR and IR, the results in the GroundTruth row represent the upper performance bound. It is evident that the ground truth performance for SR significantly exceeds that of IR, indicating that semantic rationales", + "bbox": [ + 71, + 781, + 490, + 941 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/393ca141cdf3ad35aa86d194b22a9471891c0a558d4751aa6909446c174e5b0a.jpg", + "table_caption": [ + "TABLE 6 The evaluation results $(\\%)$ of rationale quality. The best-performing results highlighted in bold." + ], + "table_footnote": [], + "table_body": "
Rationale SourceTwitter-2015Twitter-2017Political
AccF1AccF1AccF1
Semantic Rationale
Ground-Truth99.0499.0498.5498.5497.6497.64
Chimera80.9180.8375.0474.9370.2070.14
Impression Rationale
Ground-Truth69.9169.9072.7772.7176.876.87
Chimera63.4563.6561.6759.3860.5460.12
", + "bbox": [ + 506, + 103, + 919, + 247 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/f035b5be188bc560f8e6ccdde0472213968c026b15af35b35081f552ab580ea2.jpg", + "image_caption": [ + "Fig. 3. Human evaluation of factuality, clarity and fluency for SR and IR." + ], + "image_footnote": [], + "bbox": [ + 506, + 263, + 919, + 419 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "are more critical for this task than impression rationales. We hypothesize that two factors contribute to this discrepancy. Firstly, as illustrated in Table 3, semantic rationales are shorter in length and straightforward, facilitating easy comprehension, while the emotions elicited by images are inherently more abstract and multifaceted. Secondly, the IR's reliance on visual cues contrasts sharply with the Twitter dataset's text-centric sentiment distribution. Prior research has shown that a considerable majority of targets (around $58\\%$ ) are absent from images [13], and most targets (93% in Twitter-2015) exhibit emotional coherence with their textual counterparts [93]. This misalignment underscores the dataset's limitations in evaluating IRs and necessitates a nuanced understanding of the interplay between visual and textual sentiment representations.", + "bbox": [ + 501, + 474, + 921, + 691 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A total of 180 samples were randomly selected for human evaluation, with 100 samples drawn from the training set, 40 from the testing set, and 40 from the validation set of both the Twitter-2015 and Twitter-2017 datasets. Four native English speakers with Master's degrees in the arts were recruited to assess the quality of the rationale data based on three criteria: (1) factuality, evaluating whether the rationale is grounded in accurate and verifiable information; (2) clarity, assessing the logical structure and comprehensibility of the rationale; and (3) fluency, measuring the grammatical accuracy and smoothness of the language used. The Fleiss' Kappa $(\\kappa)$ values for the initial evaluation across the four raters were as follows: factuality $\\kappa = 0.922$ , clarity $\\kappa = 0.945$ , and fluency $\\kappa = 0.960$ . In cases of disagreement, the evaluators engaged in discussions to reach a consensus.", + "bbox": [ + 501, + 694, + 921, + 912 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Figure 3 presents the results of the human evaluation. It can be observed that SR consistently exhibits higher quality", + "bbox": [ + 503, + 912, + 923, + 943 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025", + "bbox": [ + 73, + 32, + 517, + 44 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/d0fccd6c9c3806a0763a2979525d5a81dbcd338f8180464516628387f00a7ac6.jpg", + "image_caption": [ + "Fig. 4. Assessment of sentiment intensity for SR and IR in both ground truth data and Chimera-generated content." + ], + "image_footnote": [], + "bbox": [ + 71, + 51, + 924, + 294 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "across all metrics, which verifies that the employed LLM is capable of generating appropriate rationale data for specific tasks when provided with concrete ground-truth labels. In comparison to SR, IR demands a more in-depth understanding of visual content and is inherently more subjective. Consequently, IR is more prone to issues of factuality and clarity, as interpreting the abstract aesthetic and emotional elements conveyed by an image often involves subjective reasoning, which may lead to misalignment with objective ground truths or human expectations.", + "bbox": [ + 71, + 343, + 493, + 491 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "5.2 Quantitative Analysis of Rationale", + "text_level": 1, + "bbox": [ + 71, + 501, + 369, + 516 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We conduct a quantitative analysis on the test sets of ground truth and Chimera-generated content to examine the impact of varying levels of sentiment intensity in cognitive rationales on the accuracy of sentiment prediction, including their potential to amplify or diminish predictive performance. As illustrated in Figure 4, the sentiment intensity distributions of Twitter-2015 and Twitter-2017 reveal distinct patterns. Specifically, the sentiment intensity of IR demonstrates a noticeable bias toward positive values, whereas the sentiment intensity of SR aligns more closely with the sentiment polarity label distribution presented in Table 3.", + "bbox": [ + 71, + 518, + 490, + 680 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "This observation suggests that IR demonstrates a bias toward positive samples, increasing the model's confidence in predicting positive instances. While this bias may be beneficial for datasets with a higher proportion of positive samples (e.g., Twitter-2017), it could lead to additional bias in datasets with a limited representation of positive samples. This finding is further corroborated by the ablation study results, which reveal that the performance of the Chimera model without IR is worse on Twitter-2017 compared to its performance on Twitter-2015. Another notable observation is that, for the ground truth of the Political Twitter dataset, the sentiment intensity distribution of IR is relatively uniform across all ranges. In contrast, the Chimera-generated content for IR exhibits a more distinguishable sentiment intensity distribution compared to the ground truth, which further validates the quality of SR, the effectiveness of the proposed Chimera training paradigm, and the robustness of Chimera's performance.", + "bbox": [ + 71, + 680, + 491, + 944 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/3acadf58ed5ee68a3128615d89f084eeca07b2f9bf82d1cd486f6dc56fa12528.jpg", + "image_caption": [ + "Fig. 5. Visualization of the top 15 most frequent aesthetic-related words in generated IR." + ], + "image_footnote": [], + "bbox": [ + 506, + 343, + 924, + 523 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "5.3 Impact of Aesthetic Attributes on Sentiment", + "text_level": 1, + "bbox": [ + 503, + 587, + 872, + 603 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To investigate the impact of image aesthetic attributes on sentiment analysis, we visualize the frequency of aesthetic-related words within the impression rationales generated by our proposed Chimera model and its variant \"Chimera w/o Aes-cap\" on the Twitter-2015 and Twitter-2017 test sets. Specifically, we visualize the top 15 most frequent aesthetic-related words within the generated IR, based on the aesthetic attributes defined by Milena et al. [94]. As shown in Figure 5, the frequency analysis of aesthetic-related words for Chimera on Twitter-2015 and Twitter-2017 reveals that \"visual,\" \"vibrant,\" \"focus,\" and \"design\" prominently appear across both datasets. These terms, associated with visual clarity, expressive quality, image composition, and cohesiveness, align with the model's improved accuracy and F1 scores. However, excluding the aesthetic caption from the input results in subtle shifts in the frequency distribution of these aesthetic-related terms. For Twitter-2015, the overall frequency distribution of aesthetic-related terms shows minimal change, with a slight increase in \"focus\" and a decrease in \"vibrant\". In contrast, for Twitter-2017, \"focus\" shows a significant increase, while \"visual\" and \"vibrant\" decrease notably. Combined with the ablation study results in Table 5, the absence of aesthetic captions in the input", + "bbox": [ + 501, + 606, + 924, + 944 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025", + "bbox": [ + 73, + 32, + 517, + 44 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/76169821fecec912e1e9076d4013b612fbcbf9e394366e9d11fb88f543258d70.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Input image-text pair & Auxiliary sentencesMethodSemantic Rationale (SR of GT, Chimera and ChatGPT-4o) Reasoning Cause(RC of MDCA)Impression Rationale (IR of GT and Chimera)/ Direct Cause(DC of MDCA)Sentiment
RT @ MamiHerald : [Joanne Stiger]POS and Melissa Keller are the first same - sex couple to be married in Broward :Ground Truth (GT)Based on the image-text pair, the sentiment towards Joanne Stiger in the given text and image is positive, because the text highlights a historic and celebratory event of her marriage, and the image shows people smiling and appearing joyful, indicating happiness and approval.The image emanates a joyous and celebratory atmosphere, highlighted by the smiling faces and relaxed postures of the individuals present. The presence of photographers and the gathering of people around the couple enhance the sense of an important and positive milestone being reached. Overall, the visual conveys happiness, love, and a significant moment being warmly embraced by all involved.Positive
ChimeraThe sentiment towards Joanne Stiger in the given text and image is positive, because the text highlights her as the first same-sex couple to be married in Broward, and the image shows her smiling and looking happy, which conveys a sense of pride and joy.The image depicts two individuals who appear relaxed and happy, with one person appearing content and relaxed. The overall impression is one of happiness and contentment. The setting and body language suggest a sense of happiness, together with a pleasant and celebratory atmosphere.Positive
Aesthetic caption: The body language of the two people shows that they are in a very different way. The background shows a room with a lot of people and the cameraman is in the background.MDCABecause the tweet simply reports on a factual event without expressing any emotional response towards Joanne Stiger. The use of the word "first" suggests that the author is excited about the news and is highlighting the fact that Stiger and Keller are the first same-sex couple to be married in Broward. Overall, the tweet does not provide enough information to determine a positive or negative sentiment towards JoAnn Stiger herself.Because joanne stiger and Melissa keller are the first same - sex couple to be married in bronward.Neutral
GPT-4oBased on the image-text pair, the sentiment towards Joanne Stiger in the given text and image is Positive, because both the faces of Joanne Stiger and Melissa Keller, as well as the individuals around them, display expressions of happiness and joy, indicating a celebratory and momentous occasion.Positive
Primary Football Cup . [St Bede]' s v St Augustines friendly . . . both out but now for the friendly # sportsmanshipGround TruthBased on the image-text pair, the sentiment towards St Bede in the given text and image is neutral, because the text describes the event as a friendly match with a focus on sportsmanship, and the image shows a regular football game without any emotional or evaluative expressions towards St Bede.The image depicts a friendly football match between St Bede's and St Augustine's on a lush green field under a partly cloudy sky. The overall scene conveys a sense of camaraderie and sportsmanship, aligning well with the neutral sentiment label. The focus appears to be on enjoying the game and fostering friendly competition in a relaxed atmosphere.Neutral
ChimeraThe sentiment towards St Bede in the given text and image is neutral, because the text simply states that St Bede is participating in a friendly match without expressing any positive or negative emotions, and the image shows a standard football match scene without any clear emotional cues.The image of the St Bede vs. St Augustines football match conveys a sense of camaraderie and sportsmanship. The players are engaged in a moment of play, suggesting a positive and supportive atmosphere. The neutral sentiment label aligns with the overall feeling of cambered athleticism and teamwork, without evoking strong emotions.Neutral
Aesthetic caption: The position of the camera captures the players in action, as they are engaged in a game of soccer. the position of the camera captures the players in action, and the position of the camera captures the players in action.MDCABecause the writer is promoting the team's friendly football match with St. Albers, which suggests that they are a fan of the team. The use of the hashtag #sportsmanship also suggests that the writer has a positive opinion of St. Bede. Overall, the language and tone of the tweet suggest that the sentiment towards St. Bede is positive.It is a friendly football cup.Positive
GPT-4oBased on the image-text pair, the sentiment towards St Bede in the given text and image is Positive, because the text highlights a friendly and sportsmanlike attitude despite both teams being out of the competition, and the image shows players engaging in the match, fostering a positive environment.Positive
[Michael Oer]NEG is latest reminder that NFL pain lasts 24 / 7 / 365Ground TruthBased on the image-text pair, the sentiment towards Michael Oer in the given text and image is negative, because the text highlights the constant and enduring pain associated with his NFL career, implying a prolonged and difficult experience.The image of Michael Oer in his NFL uniform, combined with the sentiment label "negative", conveys a sense of enduring struggle and fatigue. His posture and facial expression suggest a feeling of exhaustion or frustration, which aligns with the text's implication that the pain and challenges of an NFL career are relentless and unforgiving. The overall impression is one of continuous physical and emotional burden.Negative
ChimeraThe sentiment towards Michael Oer in the given text and image is negative, because the text highlights that his recent reminder that NFL pain lasts 24/7/365, which is a serious and distressing reminder.The image of Michael Oer exudes a sense of determination and focus, aligning with the negative sentiment label. His serious expression and the context of NFL pain underscore a feeling of exhaustion and exhaustion, which complements the idea of prolonged pain and the accompanying reminder that "no matter how much you love it," evoking a negative feeling.Negative
MDCABecause the tweet does not express any clear positive or negative opinion towards him. The tweet simply states that he is a reminder that NFL pain lasts for a specific time period. There is no indication of any emotional attachment or opinion towards Michael Oer in the tweet, and the language used is informative rather than emotional. Therefore, the sentiment towards Michael Oer in this tweet is neutral.He is reminder that NFL pain lasts 24 / 7 / 365.Neutral
GPT-4oBased on the image-text pair, the sentiment towards Michael Oer in the given text and image is Negative, because the text highlights the unending pain associated with NFL careers, using Michael Oer as an example, and alludes to the physical and possibly emotional toll he endures.Negative
", + "bbox": [ + 96, + 54, + 901, + 694 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Fig. 6. Three examples showcasing the predictions generated by Chimera, MDCA, and GPT-4o are presented for analysis. During the evaluation process, GPT-4o exclusively produces the semantic rationale (SR). The input image-text pair and auxiliary sentences are utilized solely by Chimera. For MDCA, the reasoning cause (RC), direct cause (DC), and sentiment prediction are derived through direct inference.", + "bbox": [ + 71, + 709, + 923, + 746 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "leads to the worst sentiment analysis performance across all datasets on IRG. This highlights the critical role of aesthetic captions in enhancing the model's understanding of image aesthetics, particularly in datasets like Twitter-2017 with balanced sentiment distributions. Specifically, attributes such as \"visual\" and \"vibrant\" positively contribute to sentiment analysis performance, whereas \"focus\" appears to significantly impair it. We speculate that since \"focus\" emphasizes specific image elements, potentially leads to an unbalanced interpretation of visual content. This localized emphasis can narrow the model's analytical scope, prioritizing details at", + "bbox": [ + 71, + 770, + 491, + 931 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "the expense of broader context and compositional harmony. Consequently, the model may struggle to capture holistic aesthetic and emotional cues essential for accurate sentiment classification.", + "bbox": [ + 503, + 770, + 921, + 829 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5.4 Comparison with Large Language Models", + "text_level": 1, + "bbox": [ + 504, + 849, + 856, + 864 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We evaluate the performance of GPT-4o on the MASC task under a zero-shot setting. As shown in Table 7, GPT-4o achieves an accuracy of $46.87\\%$ and an F1 score of $47.47\\%$ , which is substantially lower than Chimera, which reports $81.61\\%$ accuracy and $77.98\\%$ F1 score. On the Twitter-2017", + "bbox": [ + 503, + 869, + 923, + 941 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025", + "bbox": [ + 73, + 32, + 517, + 44 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/f8457c3112501699ff84b3cfc411209dbad50c14d2e6528c484b1697dae5c507.jpg", + "table_caption": [ + "TABLE 7 The experimental results $(\\%)$ of GPT-4o on the MASC task under a zero-shot setting are presented. The best-performing results highlighted in bold. The term \"dis\" refers to the percentage of samples where the sentiment polarity associated with a specific aspect cannot be discerned." + ], + "table_footnote": [], + "table_body": "
MethodTwitter-2015Twitter-2017
AccF1DisAccF1Dis
Chimera81.6177.98-75.6274.59-
GPT-4o46.8747.470.256.0853.280.5
GPT-4o w/o image67.0262.38-59.6460.35-
", + "bbox": [ + 76, + 135, + 486, + 229 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "dataset, GPT-4o shows an improvement with an accuracy of $56.08\\%$ and an F1 score of $53.28\\%$ . However, this performance still trails behind Chimera, which reports $75.62\\%$ accuracy and $74.59\\%$ F1 score. Surprisingly, removing the image input results in an improvement in the model's accuracy and F1 score, reaching $67.02\\%$ and $62.38\\%$ on the Twitter-2015 dataset, respectively. This observation contrasts sharply with the phenomenon observed in the baseline model. Similarly, in the Twitter-2017 dataset, the performance of GPT-4o without image input is slightly better than with the image input. We speculate that in task-specific models, incorporating image data typically improves sentiment classification performance, as these models are finetuned to leverage multi-modal inputs effectively. However, in a zero-shot setting, GPT-4o operates based on its general pre-trained knowledge, which may not be fully optimized for combining textual and visual inputs for sentiment analysis. In this setting, adding image input may introduce noise rather than meaningful information. Moreover, GPT-4o has a low Dis value on both datasets, which slightly decreases to 0 when the image input is removed. This further suggests that the model's ability to distinguish sentiment polarity is, to a certain extent, influenced by the inclusion of the visual modality.", + "bbox": [ + 76, + 273, + 488, + 622 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "5.5 Case Study", + "text_level": 1, + "bbox": [ + 76, + 645, + 199, + 660 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "An additional case study is performed to provide a more comprehensive evaluation of the effectiveness of the proposed Chimera model. Figure 6 illustrates three representative examples, each corresponding to positive, neutral, and negative samples, respectively. As illustrated in the first example, MDCA is the sole model to predict \"Neutral\" for the target \"Joanne Stiger,\" whereas the other three models accurately predict \"Positive\". This result is primarily due to the RC and DC generated by MDCA, which lack the expression of positive or negative sentiment. Notably, the RC predominantly emphasizes the textual content, overlooking the joyful atmosphere conveyed through the image. In the second example, an intriguing observation is that the situation is the exact opposite of the previous case. Here, only Chimera correctly predicts the sentiment polarity of the specific target, \"St. Bede\" as \"Neutral\" whereas both GPT-4o and MDCA incorrectly classify it as \"Positive\". It is observed that the SR of GPT-4o and the RC of MDCA both convey a positive sentiment, largely due", + "bbox": [ + 76, + 665, + 488, + 941 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "to an overinterpretation and extrapolation of the textual content. In contrast, Chimera demonstrates accurate prediction by appropriately integrating a balanced understanding of the image content and its aesthetic attributes. In the final example, both Chimera and GPT-4o accurately identify the sentiment polarity of \"Michael Oher\" as \"Negative\". MDCA's incorrect prediction of \"Neutral\" may be attributed to its generated RC and DC failing to account for the individual's expression, thereby overlooking critical semantic cues present in the visual content. With the aid of facial descriptions, Chimera effectively captures and aligns fine-grained emotional cues from visual content, enabling it to generate coherent SR and IR and achieve accurate predictions. The above representative instances further verify that incorporating cognitive and aesthetic sentiment causality enhances sentiment classification accuracy in MABSA.", + "bbox": [ + 506, + 54, + 919, + 286 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "6 CONCLUSION", + "text_level": 1, + "bbox": [ + 508, + 311, + 643, + 325 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In this paper, we propose a cognitive sentiment causality understanding framework tailored for multimodal aspect-based sentiment classification. The framework, which is novel in its approach, consists of four primary components: linguistic-aware semantic alignment, a translation module, rationale dataset construction, and rationale-aware learning. The linguistic-aware semantic alignment component facilitates visual patch-token level alignment through dynamic patch selection and semantic patch calibration. The translation module transforms holistic image and object-level visual information into corresponding emotion-laden textual representations. The rationale dataset construction involves designing refined prompts and leveraging LLMs to generate semantic and impression rationale. Finally, rationale-aware learning incorporates semantic explanations and affective-cognitive resonance to enhance the model's capacity to understand cognitive sentiment causality. Experimental results on three Twitter datasets demonstrate that the proposed Chimera achieves performance gains over SOTA baselines.", + "bbox": [ + 506, + 333, + 919, + 609 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 508, + 635, + 679, + 648 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "This research is supported by the Shanghai Science and Technology Innovation Action Plan (No. 24YF2710100), the Shanghai Special Project to Promote High-quality Industrial Development (No. RZ-CYAI-01-24-0288), the National Nature Science Foundation of China (No. 62477010), the Science and Technology Commission of Shanghai Municipality Grant (No. 22511105901, No. 21511100402), the Ministry of Education, Singapore under its MOE Academic Research Fund Tier 2 (STEM RIE2025 Award MOE-T2EP20123-0005) and by the RIE2025 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) (Award I2301E0026), administered by A\\*STAR, as well as supported by Alibaba Group and NTU Singapore.", + "bbox": [ + 506, + 657, + 919, + 845 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 508, + 871, + 617, + 885 + ], + "page_idx": 14 + }, + { + "type": "ref_text", + "text": "[1] R. Mao, Q. Liu, K. He, W. Li, and E. Cambria, \"The biases of pretrained language models: An empirical study on prompt-based sentiment analysis and emotion detection,\" IEEE Transactions on Affective Computing, vol. 14, no. 3, pp. 1743-1753, 2023.", + "bbox": [ + 508, + 895, + 921, + 941 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025", + "bbox": [ + 76, + 32, + 517, + 42 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[2] K. Du, F. Xing, R. Mao, and E. Cambria, \"Financial sentiment analysis: Techniques and applications,\" ACM Computing Surveys, vol. 56, no. 9, pp. 1-42, 2024.", + "[3] R. Mao, M. Ge, S. Han, W. Li, K. He, L. Zhu, and E. Cambria, \"A survey on pragmatic processing techniques,\" Information Fusion, vol. 114, p. 102712, 2025.", + "[4] L. Xiao, Y. Xue, H. Wang, X. Hu, D. Gu, and Y. Zhu, \"Exploring fine-grained syntactic information for aspect-based sentiment classification with dual graph neural networks,\" Neurocomputing, vol. 471, pp. 48-59, 2022.", + "[5] Y. Ma, R. Mao, Q. Lin, P. Wu, and E. Cambria, \"Quantitative stock portfolio optimization by multi-task learning risk and return,\" Information Fusion, vol. 104, p. 102165, 2024.", + "[6] K. Du, F. Xing, R. Mao, and E. Cambria, \"FinSenticNet: A concept-level lexicon for financial sentiment analysis,\" in 2023 IEEE Symposium Series on Computational Intelligence (SSCI). IEEE, 2023, pp. 109-114.", + "[7] X. Zhang, R. Mao, and E. Cambria, \"SenticVec: Toward robust and human-centric neurosymbolic sentiment analysis,\" in Findings of the Association for Computational Linguistics: ACL. Association for Computational Linguistics, 2024, pp. 4851-4863.", + "[8] S. Zhao, M. Jia, L. A. Tuan, F. Pan, and J. Wen, \"Universal vulnerabilities in large language models: Backdoor attacks for incontext learning,\" in Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, 2024, pp. 11507-11522.", + "[9] L. Zhu, R. Mao, E. Cambria, and B. J. Jansen, \"Neurosymbolic AI for personalized sentiment analysis,\" in Proceedings of International Conference on Human-Computer Interaction (HCII), 2024, pp. 269-290.", + "[10] S. Zhao, J. Wen, A. Luu, J. Zhao, and J. Fu, \"Prompt as triggers for backdoor attack: Examining the vulnerability in language models,\" in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, 2023, pp. 12303-12317.", + "[11] J. YU and J. JIANG, \"Adapting bert for target-oriented multimodal sentiment classification.(2019),\" in Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, 2019, pp. 5408-5414.", + "[12] J. Yu, J. Jiang, and R. Xia, \"Entity-sensitive attention and fusion network for entity-level multimodal sentiment classification,\" IEEE/ACM Transactions on Audio, Speech, and Language Processing, vol. 28, pp. 429-439, 2019.", + "[13] J. Yu, J. Wang, R. Xia, and J. Li, \"Targeted multimodal sentiment classification based on coarse-to-fine grained image-target matching.\" in *IJCAI*, 2022, pp. 4482-4488.", + "[14] Y. Ling, J. Yu, and R. Xia, \"Vision-language pre-training for multimodal aspect-based sentiment analysis,\" in Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2022, pp. 2149-2159.", + "[15] L. Yang, J.-C. Na, and J. Yu, \"Cross-modal multitask transformer for end-to-end multimodal aspect-based sentiment analysis,\" Information Processing & Management, vol. 59, no. 5, p. 103038, 2022.", + "[16] R. Zhou, W. Guo, X. Liu, S. Yu, Y. Zhang, and X. Yuan, \"Aom: Detecting aspect-oriented information for multimodal aspect-based sentiment analysis,\" in Findings of the Association for Computational Linguistics: ACL 2023, 2023, pp. 8184-8196.", + "[17] Z. Khan and Y. Fu, \"Exploiting bert for multimodal target sentiment classification through input space translation,\" in Proceedings of the 29th ACM international conference on multimedia, 2021, pp. 3034-3042.", + "[18] L. Xiao, E. Zhou, X. Wu, S. Yang, T. Ma, and L. He, \"Adaptive multi-feature extraction graph convolutional networks for multimodal target sentiment analysis,\" in 2022 IEEE International Conference on Multimedia and Expo (ICME). IEEE, 2022, pp. 1-6.", + "[19] L. Xiao, X. Wu, S. Yang, J. Xu, J. Zhou, and L. He, \"Cross-modal fine-grained alignment and fusion network for multimodal aspect-based sentiment analysis,\" Information Processing & Management, vol. 60, no. 6, p. 103508, 2023.", + "[20] Y. Huang, Z. Chen, J. Chen, J. Z. Pan, Z. Yao, and W. Zhang, \"Target-oriented sentiment classification with sequential cross-modal semantic graph,\" in International Conference on Artificial Neural Networks. Springer, 2023, pp. 587-599.", + "[21] Q. Wang, H. Xu, Z. Wen, B. Liang, M. Yang, B. Qin, and R. Xu, \"Image-to-text conversion and aspect-oriented filtration for multimodal aspect-based sentiment analysis,\" IEEE Transactions on Affective Computing, 2023.", + "[22] L. Xiao, X. Wu, J. Xu, W. Li, C. Jin, and L. He, \"Atlantis: Aesthetic-oriented multiple granularities fusion network for joint multi-" + ], + "bbox": [ + 76, + 54, + 491, + 941 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "modal aspect-based sentiment analysis,\" Information Fusion, vol. 106, p. 102304, 2024.", + "[23] H. Yang, Y. Zhao, and B. Qin, \"Face-sensitive image-to-emotional-text cross-modal translation for multimodal aspect-based sentiment analysis,\" in Proceedings of the 2022 conference on empirical methods in natural language processing, 2022, pp. 3324-3335.", + "[24] R. Fan, T. He, M. Chen, M. Zhang, X. Tu, and M. Dong, \"Dual causes generation assisted model for multimodal aspect-based sentiment classification,\" IEEE Transactions on Neural Networks and Learning Systems, 2024.", + "[25] J. Wang, Z. Li, J. Yu, L. Yang, and R. Xia, \"Fine-grained multimodal named entity recognition and grounding with a generative framework,\" in Proceedings of the 31st ACM International Conference on Multimedia, 2023, pp. 3934-3943.", + "[26] X. Zhang, R. Mao, K. He, and E. Cambria, \"Neurosymbolic sentiment analysis with dynamic word sense disambiguation,\" in Findings of the Association for Computational Linguistics: EMNLP 2023, Singapore, 2023, pp. 8772-8783.", + "[27] Q. Lu, X. Sun, Y. Long, Z. Gao, J. Feng, and T. Sun, \"Sentiment analysis: Comprehensive reviews, recent advances, and open challenges,\" IEEE Transactions on Neural Networks and Learning Systems, 2023.", + "[28] H. Liu, W. Wang, and H. Li, \"Interpretable multimodal misinformation detection with logic reasoning,\" in Findings of the Association for Computational Linguistics: ACL 2023, 2023, pp. 9781-9796.", + "[29] R. Mao, K. Du, Y. Ma, L. Zhu, and E. Cambria, \"Discovering the cognition behind language: Financial metaphor analysis with MetaPro,\" in 2023 IEEE International Conference on Data Mining (ICDM). IEEE, 2023, pp. 1211-1216.", + "[30] E. Cambria, X. Zhang, R. Mao, M. Chen, and K. Kwok, \"SenticNet 8: Fusing emotion AI and commonsense AI for interpretable, trustworthy, and explainable affective computing,\" in Proceedings of International Conference on Human-Computer Interaction (HCI), Washington DC, USA, 2024, pp. 197-216.", + "[31] K. Du, R. Mao, F. Xing, and E. Cambria, \"Explainable stock price movement prediction using contrastive learning,\" in Proceedings of the 33rd ACM International Conference on Information and Knowledge Management (CIKM), Idaho, USA, 2024, pp. 529-537.", + "[32] H. Zhang, X. Zhou, Z. Shen, and Y. Li, \"Privfr: Privacy-enhanced federated recommendation with shared hash embedding,\" IEEE Transactions on Neural Networks and Learning Systems, 2024.", + "[33] E. Yang, L. Shen, G. Guo, X. Wang, X. Cao, J. Zhang, and D. Tao, \"Model merging in llms, mllms, and beyond: Methods, theories, applications and opportunities,\" arXiv preprint arXiv:2408.07666, 2024.", + "[34] L. Xiao, R. Mao, X. Zhang, L. He, and E. Cambria, \"Vanessa: Visual connotation and aesthetic attributes understanding network for multimodal aspect-based sentiment analysis,\" in Findings of the Association for Computational Linguistics: EMNLP 2024, 2024, pp. 11486-11500.", + "[35] J. Kruk, J. Lubin, K. Sikka, X. Lin, D. Jurafsky, and A. Divakaran, \"Integrating text and image: Determining multimodal document intent in instagram posts,\" in Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), 2019, pp. 4622-4632.", + "[36] H. Liu, W. Wang, and H. Li, \"Towards multi-modal sarcasm detection via hierarchical congruity modeling with knowledge enhancement,\" in Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, 2022, pp. 4995-5006.", + "[37] R. Mao and X. Li, \"Bridging towers of multi-task learning with a gating mechanism for aspect-based sentiment analysis and sequential metaphor identification,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, 2021, pp. 13534-13542.", + "[38] T. Yue, R. Mao, H. Wang, Z. Hu, and E. Cambria, \"KnowleNet: Knowledge fusion network for multimodal sarcasm detection,\" Information Fusion, vol. 100, p. 101921, 2023.", + "[39] C. Fan, J. Lin, R. Mao, and E. Cambria, \"Fusing pairwise modalities for emotion recognition in conversations,\" Information Fusion, vol. 106, p. 102306, 2024.", + "[40] L. Yang, Z. Wang, Z. Li, J.-C. Na, and J. Yu, \"An empirical study of multimodal entity-based sentiment analysis with chatgpt: Improving in-context learning via entity-aware contrastive learning,\" Information Processing & Management, vol. 61, no. 4, p. 103724, 2024.", + "[41] L. Yang, J. Wang, J.-C. Na, and J. Yu, \"Generating paraphrase sen" + ], + "bbox": [ + 506, + 55, + 921, + 941 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025", + "bbox": [ + 73, + 32, + 517, + 44 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "tences for multimodal entity-category-sentiment triple extraction,\" Knowledge-Based Systems, vol. 278, p. 110823, 2023.", + "[42] J. Zhou, J. Zhao, J. X. Huang, Q. V. Hu, and L. He, \"Masad: A large-scale dataset for multimodal aspect-based sentiment analysis,\" Neurocomputing, vol. 455, pp. 47-58, 2021.", + "[43] W. Zhang, X. Li, Y. Deng, L. Bing, and W. Lam, \"A survey on aspect-based sentiment analysis: Tasks, methods, and challenges,\" IEEE Transactions on Knowledge and Data Engineering, vol. 35, no. 11, pp. 11019-11038, 2022.", + "[44] X. Ju, D. Zhang, R. Xiao, J. Li, S. Li, M. Zhang, and G. Zhou, \"Joint multi-modal aspect-sentiment analysis with auxiliary cross-modal relation detection,\" in Proceedings of the 2021 conference on empirical methods in natural language processing, 2021, pp. 4395-4405.", + "[45] J. Mu, F. Nie, W. Wang, J. Xu, J. Zhang, and H. Liu, \"Mocolnet: A momentum contrastive learning network for multimodal aspect-level sentiment analysis,\" IEEE Transactions on Knowledge and Data Engineering, 2023.", + "[46] F. Zhao, C. Li, Z. Wu, Y. Ouyang, J. Zhang, and X. Dai, \"M2df: Multi-grained multi-curriculum denoising framework for multimodal aspect-based sentiment analysis,\" in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, 2023, pp. 9057-9070.", + "[47] E. Cambria, R. Mao, M. Chen, Z. Wang, and S.-B. Ho, \"Seven pillars for the future of artificial intelligence,\" IEEE Intelligent Systems, vol. 38, no. 6, pp. 62-69, 2023.", + "[48] R. Arnheim, Art and visual perception: A psychology of the creative eye. Univ of California Press, 1954.", + "[49] V. S. Ramachandran and W. Hirstein, \"The science of art: A neurological theory of aesthetic experience,\" Journal of Consciousness Studies, vol. 6, no. 6-7, pp. 15-51, 1999.", + "[50] H. Zeng, Z. Cao, L. Zhang, and A. C. Bovik, \"A unified probabilistic formulation of image aesthetic assessment,\" IEEE Transactions on Image Processing, vol. 29, pp. 1548-1561, 2019.", + "[51] G. C. Cupchik and J. László, Emerging visions of the aesthetic process: In psychology, semiology, and philosophy. Cambridge University Press, 1992.", + "[52] X. Jin, L. Wu, G. Zhao, X. Li, X. Zhang, S. Ge, D. Zou, B. Zhou, and X. Zhou, \"Aesthetic attributes assessment of images,\" in Proceedings of the 27th ACM international conference on multimedia, 2019, pp. 311-319.", + "[53] J. Ke, K. Ye, J. Yu, Y. Wu, P. Milanfar, and F. Yang, \"Vila: Learning image aesthetics from user comments with vision-language pretraining,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 10041-10051.", + "[54] J. Kruk, C. Ziems, and D. Yang, \"Impressions: Visual semiotics and aesthetic impact understanding,\" in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, 2023, pp. 12273-12291.", + "[55] R. Anil, A. M. Dai, O. First, M. Johnson, D. Lepikhin, A. Passos, S. Shakeri, E. Taropa, P. Bailey, Z. Chen et al., \"Palm 2 technical report,\" arXiv preprint arXiv:2305.10403, 2023.", + "[56] R. Mao, G. Chen, X. Zhang, F. Guerin, and E. Cambria, \"GPTEval: A survey on assessments of ChatGPT and GPT-4,\" in Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024). ELRA and ICCL, 2024, pp. 7844-7866.", + "[57] S. Zhao, L. A. Tuan, J. Fu, J. Wen, and W. Luo, \"Exploring clean label backdoor attacks and defense in language models,\" IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2024.", + "[58] S. Zhao, X. Xu, L. Xiao, J. Wen, and L. A. Tuan, \"Clean-label backdoor attack and defense: An examination of language model vulnerability,\" Expert Systems with Applications, vol. 265, p. 125856, 2025.", + "[59] J. Achiam, S. Adler, S. Agarwal, L. Ahmad, I. Akkaya, F. L. Aleman, D. Almeida, J. Altenschmidt, S. Altman, S. Anadkat et al., \"Gpt-4 technical report,\" arXiv preprint arXiv:2303.08774, 2023.", + "[60] G. Team, R. Anil, S. Borgeaud, J.-B. Alayrac, J. Yu, R. Soricut, J. Schalkwyk, A. M. Dai, A. Hauth, K. Millican et al., \"Gemini: a family of highly capable multimodal models,\" arXiv preprint arXiv:2312.11805, 2023.", + "[61] H. Touvron, L. Martin, K. Stone, P. Albert, A. Almahairi, Y. Babaei, N. Bashlykov, S. Batra, P. Bhargava, S. Bhosale et al., \"Llama 2: Open foundation and fine-tuned chat models,\" arXiv preprint arXiv:2307.09288, 2023." + ], + "bbox": [ + 75, + 54, + 491, + 941 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[62] H. Liu, W. Wang, H. Sun, A. Rocha, and H. Li, \"Robust domain misinformation detection via multi-modal feature alignment,\" IEEE Transactions on Information Forensics and Security, 2023.", + "[63] R. Mao, K. He, C. Ong, Q. Liu, and E. Cambria, “Metapro 2.0: Computational metaphor processing on the effectiveness of anomalous language modeling,” in Findings of the Association for Computational Linguistics ACL 2024, 2024, pp. 9891–9908.", + "[64] Z. Tan, D. Li, S. Wang, A. Beigi, B. Jiang, A. Bhattacharjee, M. Karami, J. Li, L. Cheng, and H. Liu, \"Large language models for data annotation: A survey,\" arXiv preprint arXiv:2402.13446, 2024.", + "[65] R. Mao, G. Chen, X. Li, M. Ge, and E. Cambria, \"A comparative analysis of metaphorical cognition in chatgpt and human minds,\" Cognitive Computation, vol. 17, no. 1, p. 35, 2025.", + "[66] Y. Jia, X. Wu, H. Li, Q. Zhang, Y. Hu, S. Zhao, and W. Fan, \"Uni-retrieval: A multi-style retrieval framework for stem's education,\" arXiv preprint arXiv:2502.05863, 2025.", + "[67] J. Wei, X. Wang, D. Schuurmans, M. Bosma, F. Xia, E. Chi, Q. V. Le, D. Zhou et al., \"Chain-of-thought prompting elicits reasoning in large language models,\" Advances in neural information processing systems, vol. 35, pp. 24824-24837, 2022.", + "[68] K. Cobbe, V. Kosaraju, M. Bavarian, M. Chen, H. Jun, L. Kaiser, M. Plappert, J. Tworek, J. Hilton, R. Nakano et al., \"Training verifiers to solve math word problems,\" arXiv preprint arXiv:2110.14168, 2021.", + "[69] P. Wang, A. Chan, F. Ilievski, M. Chen, and X. Ren, \"Pinto: Faithful language reasoning using prompt-generated rationales,\" in The Eleventh International Conference on Learning Representations, 2023.", + "[70] P. Wang, Z. Wang, Z. Li, Y. Gao, B. Yin, and X. Ren, \"Scott: Self-consistent chain-of-thought distillation,\" in Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2023, pp. 5546-5558.", + "[71] H. Liu, Z. Teng, L. Cui, C. Zhang, Q. Zhou, and Y. Zhang, \"Logicot: Logical chain-of-thought instruction tuning,\" in The 2023 Conference on Empirical Methods in Natural Language Processing, 2023.", + "[72] M. Kang, S. Lee, J. Baek, K. Kawaguchi, and S. J. Hwang, \"Knowledge-augmented reasoning distillation for small language models in knowledge-intensive tasks,\" Advances in Neural Information Processing Systems, vol. 36, 2024.", + "[73] Y. Li, A. Dao, W. Bao, Z. Tan, T. Chen, H. Liu, and Y. Kong, \"Facial affective behavior analysis with instruction tuning,\" in European Conference on Computer Vision. Springer, 2025, pp. 165-186.", + "[74] J. Guo, J. Deng, A. Lattas, and S. Zafeiriou, \"Sample and computation redistribution for efficient face detection,\" in International Conference on Learning Representations, 2021.", + "[75] S. Wegreffer, J. Hessel, S. Swayamdipta, M. Riedl, and Y. Choi, \"Reframing human-ai collaboration for generating free-text explanations,\" in Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 2022, pp. 632–658.", + "[76] L. Meng, H. Li, B.-C. Chen, S. Lan, Z. Wu, Y.-G. Jiang, and S.-N. Lim, \"Adavit: Adaptive vision transformers for efficient image recognition,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp. 12309-12318.", + "[77] Z. Fu, L. Zhang, H. Xia, and Z. Mao, \"Linguistic-aware patch slimming framework for fine-grained cross-modal alignment,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 26307-26316.", + "[78] C. Maddison, A. Mnih, and Y. Teh, \"The concrete distribution: A continuous relaxation of discrete random variables,\" in Proceedings of the international conference on Learning Representations. International Conference on Learning Representations, 2017.", + "[79] Z. Zong, K. Li, G. Song, Y. Wang, Y. Qiao, B. Leng, and Y. Liu, \"Self-slimmed vision transformer,\" in European Conference on Computer Vision. Springer, 2022, pp. 432-448.", + "[80] F. Faghri, D. J. Fleet, J. R. Kiros, and S. Fidler, \"Vse++: Improving visual-semantic embeddings with hard negatives,\" arXiv preprint arXiv:1707.05612, 2017.", + "[81] L. Yang, J. Yu, C. Zhang, and J.-C. Na, \"Fine-grained sentiment analysis of political tweets with entity-aware multimodal network,\" in Diversity, Divergence, Dialogue: 16th International Conference, iConference 2021, Beijing, China, March 17–31, 2021, Proceedings, Part I 16. Springer, 2021, pp. 411–420.", + "[82] H. W. Chung, L. Hou, S. Longpre, B. Zoph, Y. Tay, W. Fedus, Y. Li, X. Wang, M. Dehghani, S. Brahma et al., \"Scaling instructionfinetuned language models,\" Journal of Machine Learning Research, vol. 25, no. 70, pp. 1-53, 2024." + ], + "bbox": [ + 506, + 55, + 921, + 941 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025", + "bbox": [ + 73, + 32, + 517, + 44 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[83] I. Loshchilov, \"Decoupled weight decay regularization,\" arXiv preprint arXiv:1711.05101, 2017.", + "[84] K. He, X. Zhang, S. Ren, and J. Sun, \"Deep residual learning for image recognition,\" in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 770-778.", + "[85] D. Tang, B. Qin, and T. Liu, \"Aspect level sentiment classification with deep memory network,\" in Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, 2016, pp. 214-224.", + "[86] F. Fan, Y. Feng, and D. Zhao, \"Multi-grained attention network for aspect-level sentiment classification,\" in Proceedings of the 2018 conference on empirical methods in natural language processing, 2018, pp. 3433-3442.", + "[87] J. D. M.-W. C. Kenton and L. K. Toutanova, \"Bert: Pre-training of deep bidirectional transformers for language understanding,\" in Proceedings of naacL-HLT, vol. 1. Minneapolis, Minnesota, 2019, p. 2.", + "[88] N. Xu, W. Mao, and G. Chen, \"Multi-interactive memory network for aspect based multimodal sentiment analysis,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 33, 2019, pp. 371-378.", + "[89] J. Yu, K. Chen, and R. Xia, \"Hierarchical interactive multimodal transformer for aspect-based multimodal sentiment analysis,\" IEEE Transactions on Affective Computing, vol. 14, no. 3, pp. 1966-1978, 2022.", + "[90] D. Liu, L. Li, X. Tao, J. Cui, and Q. Xie, \"Descriptive prompt paraphrasing for target-oriented multimodal sentiment classification,\" in Findings of the Association for Computational Linguistics: EMNLP 2023, 2023, pp. 4174-4186.", + "[91] B. Yang and J. Li, \"Visual elements mining as prompts for instruction learning for target-oriented multimodal sentiment classification,\" in Findings of the Association for Computational Linguistics: EMNLP 2023, 2023, pp. 6062-6075.", + "[92] J. Camacho-Collados, K. Rezaee, T. Riahi, A. Ushio, D. Loureiro, D. Antypas, J. Boisson, L. E. Anke, F. Liu, and E. Martinez-Camara, \"Tweetnlp: Cutting-edge natural language processing for social media,\" in Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, 2022, pp. 38-49.", + "[93] J. Ye, J. Zhou, J. Tian, R. Wang, Q. Zhang, T. Gui, and X.-J. Huang, \"Rethinkingtmsc: An empirical study for target-oriented multimodal sentiment classification,\" in Findings of the Association for Computational Linguistics: EMNLP 2023, 2023, pp. 270-277.", + "[94] M. Ivanova and S. French, The aesthetics of science: beauty, imagination and understanding. Routledge, 2020." + ], + "bbox": [ + 73, + 54, + 491, + 568 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/b2455a823d706a1fb297782d965eb4fc8120cd085b7a9a145704ebd539f3a434.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 75, + 595, + 197, + 688 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Luwei Xiao is currently pursuing his Ph.D. degree in the School of Computer Science and Technology at East China Normal University, Shanghai, China, under the supervision of Prof. Liang He. He is presently conducting an academic visit to the College of Computing and Data Science at Nanyang Technological University, Singapore, under the supervision of Prof. Erik Cambria, with funding support from the China Scholarship Council (CSC). His research interests encompass multimodal learning, semi-", + "bbox": [ + 207, + 580, + 491, + 707 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "ment analysis, and image aesthetic assessment.", + "bbox": [ + 73, + 707, + 359, + 719 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/590aa9d63be2eebe6cfa3158c9b043df2309dd5b1ad1248286a4339f6c939ac7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 78, + 767, + 194, + 883 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Rui Mao is a Research Scientist and Lead Investigator at Nanyang Technological University. He obtained his Ph.D. degree in Computing Science from the University of Aberdeen. His research interest lies in NLP, cognitive computing, and their applications in finance and cognitive science. He and his funded company (Ruimao Tech) have developed an end-to-end system (MetaPro) for computational metaphor processing and a neural search engine (wensousou.com) for searching Chinese ancient po", + "bbox": [ + 207, + 766, + 491, + 892 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "ems with modern language. He served as Area Chair in COLING and EMNLP and Associate Editor in IEEE Transactions on Affective Computing, Expert Systems, Information Fusion and Neurocomputing. Contact him at rui.mao@ntu.edu.sg.", + "bbox": [ + 73, + 892, + 491, + 939 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/c68014cf5101a6d4a08998285ee4b085ef6bfd6d34aee5cfdc345bad0334cc9f.jpg", + "image_caption": [ + "tacks." + ], + "image_footnote": [], + "bbox": [ + 506, + 63, + 625, + 171 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Shuai Zhao obtained his Ph.D. degree from Jinan University in 2024. He spent one year as a visiting student and six months as a research assistant at the School of Computer Science and Engineering, Nanyang Technological University. He is now a Postdoctoral Researcher at the College of Computing and Data Science, Nanyang Technological University. His current research interests include deep learning and natural language processing for code generation, summary generation, text classification and backdoor at", + "bbox": [ + 638, + 54, + 921, + 181 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/408bcb009bb136bc756c3feeeae37046041ca6f53f35e217e619ad31c595a06a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 238, + 624, + 354 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Qika Lin received his Ph.D. degree at Xi'an Jiaotong University. Currently, he is a Research Fellow at the National University of Singapore. His research interests include natural language processing, knowledge reasoning, and multimodal learning. He has published papers in top-tier journals/conferences, including TKDE, ACL, SIGIR, KDD, ICDE, and IJCAI. He has actively contributed to several journals/conferences as a reviewer or PC member, including TPAMI, IJCV, TKDE, TMC, TNNLS, NeurIPS, ICLR, SIGIR,", + "bbox": [ + 638, + 237, + 921, + 364 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "ACL, and EMNLP. He also served as a Guest Editor of IEEE TCSS and Information Fusion.", + "bbox": [ + 504, + 364, + 921, + 386 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/4781794a3325adb4297715bb796cb377152ae80fbf054d40217e4712f2292d98.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 434, + 627, + 549 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Yanhao Jia is a phd student at Nanyang Technological University. He obtained his bechealor degree in Computing Science from Shandong University. He has published over seven conference/journal papers on ECCV/NeurIPS/IEEE Trans on nuclear science and been the reviewer for ACM MM and ECCV.", + "bbox": [ + 638, + 431, + 921, + 513 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/055ce189bbd92010fabe2e02ed9ab7e4fe8376936fe577fb386be48061eb9c9e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 516, + 595, + 617, + 710 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Liang He received his PhD degree from the Department of Computer Science and Technology, East China Normal University, China. He is now a professor and the Vice Dean of the School of Computer Science and Technology, East China Normal University. His current research interest includes Natural Language Processing, Knowledge Processing, and Human in the Loop for Decision-making.", + "bbox": [ + 638, + 593, + 921, + 698 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/abc09142c4768e0fb2c6c4106a4e36deaa52ea49863b3bb59a2135f5bffcfe98.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 757, + 627, + 875 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Erik Cambria is a Professor at Nanyang Technological University, where he also holds the appointment of Provost Chair in Computer Science and Engineering, and Founder of several AI companies, such as SenticNet, offering B2B sentiment analysis services, and finaXai, providing fully explainable financial insights. His research focuses on neurosymbolic AI for interpretable, trustworthy, and explainable affective computing in domains like social media monitoring, financial forecasting, and AI for social", + "bbox": [ + 638, + 756, + 921, + 883 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "good. He is an IEEE Fellow, Associate Editor of various top-tier AI journals, e.g., Information Fusion and IEEE Transactions on Affective Computing, and is involved in several international conferences as keynote speaker, program chair and committee member. Contact him at cambria@ntu.edu.sg.", + "bbox": [ + 504, + 883, + 921, + 941 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025", + "bbox": [ + 73, + 32, + 517, + 44 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 906, + 32, + 921, + 42 + ], + "page_idx": 17 + } +] \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15848/c2a6d104-48f7-465b-bad3-e87ed3722daf_model.json b/data/2025/2504_15xxx/2504.15848/c2a6d104-48f7-465b-bad3-e87ed3722daf_model.json new file mode 100644 index 0000000000000000000000000000000000000000..33fc55721fdd034985801f680e67ce9cb524b601 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/c2a6d104-48f7-465b-bad3-e87ed3722daf_model.json @@ -0,0 +1,4251 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.519, + 0.044 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "1" + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.066, + 0.923, + 0.137 + ], + "angle": 0, + "content": "Exploring Cognitive and Aesthetic Causality for Multimodal Aspect-Based Sentiment Analysis" + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.151, + 0.802, + 0.187 + ], + "angle": 0, + "content": "Luwei Xiao, Student Member, IEEE, Rui Mao*, Member, IEEE, Shuai Zhao, Qika Lin, Yanhao Jia, Liang He, and Erik Cambria, Fellow, IEEE" + }, + { + "type": "text", + "bbox": [ + 0.105, + 0.209, + 0.89, + 0.394 + ], + "angle": 0, + "content": "Abstract—Multimodal aspect-based sentiment classification (MASC) is an emerging task due to an increase in user-generated multimodal content on social platforms, aimed at predicting sentiment polarity toward specific aspect targets (i.e., entities or attributes explicitly mentioned in text-image pairs). Despite extensive efforts and significant achievements in existing MASC, substantial gaps remain in understanding fine-grained visual content and the cognitive rationales derived from semantic content and impressions (cognitive interpretations of emotions evoked by image content). In this study, we present Chimera: a cognitive and aesthetic sentiment causality understanding framework to derive fine-grained holistic features of aspects and infer the fundamental drivers of sentiment expression from both semantic perspectives and affective-cognitive resonance (the synergistic effect between emotional responses and cognitive interpretations). Specifically, this framework first incorporates visual patch features for patch-word alignment. Meanwhile, it extracts coarse-grained visual features (e.g., overall image representation) and fine-grained visual regions (e.g., aspect-related regions) and translates them into corresponding textual descriptions (e.g., facial, aesthetic). Finally, we leverage the sentimental causes and impressions generated by a large language model (LLM) to enhance the model's awareness of sentimental cues evoked by semantic content and affective-cognitive resonance. Experimental results on standard MASC datasets demonstrate the effectiveness of the proposed model, which also exhibits greater flexibility to MASC compared to LLMs such as GPT-4o. We have publicly released the complete implementation and dataset at https://github.com/Xillv/Chimera" + }, + { + "type": "text", + "bbox": [ + 0.105, + 0.405, + 0.861, + 0.432 + ], + "angle": 0, + "content": "Index Terms—Multimodal aspect-based sentiment classification, Sentiment causality, Large language models, Affective-cognitive resonance." + }, + { + "type": "title", + "bbox": [ + 0.075, + 0.463, + 0.231, + 0.479 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.491, + 0.493, + 0.799 + ], + "angle": 0, + "content": "MULTIMODAL aspect-based sentiment classification (MASC) is a valuable task for analyzing user-generated multimodal content on social platforms, aiming to predict the sentiment polarity of a specific target/aspect term within a sentence, based on an image-text pair. In an era marked by growing global interconnectedness, social platforms have become essential channels for individuals to express opinions and share experiences [1]-[3]. These platforms support multimodal content, blending text and visual media, which better reflects how sentiment is conveyed [4]. Consequently, analyzing fine-grained sentiment expression in multimodal scenarios not only improves the depth of sentiment classification but also aligns with the natural manner in which users express opinions and emotions, ultimately supporting more accurate sentiment analysis for applications in finance [5], [6], social research [7], [8], and human-computer interaction [9], [10]. Current methodologies for MASC can be broadly divided into two principal categories: visual-text fusion-based approaches and translation-based approaches. Visual-text fusion-based methods address MASC by directly integrating visual content with" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.491, + 0.923, + 0.52 + ], + "angle": 0, + "content": "textual features through various attention-based mechanisms [11]-[16]." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.52, + 0.925, + 0.945 + ], + "angle": 0, + "content": "Yu et al. [11] were the first to propose the utilization of ResNet for image feature extraction in conjunction with BERT for language sequence modeling, subsequently feeding these components into a BERT encoder to facilitate the interactive modeling of cross-modal representations. Ling et al. [14] introduced a vision-language pre-training framework that leverages Faster R-CNN for extracting object-level visual features and BART for generating textual features, with the model pre-trained using three task-specific strategies targeting the language, vision, respectively. Yu et al. [13] presented a novel multi-task learning framework Image-Target Matching Network (ITM), which concurrently performs coarse-to-fine-grained visual-textual relevance detection and visual object-target alignment through cross-modal Transformers. Translation-based approaches focus on mapping visual content into the language space as auxiliary textual representations, leveraging this supplementary information, or integrating it with visual features to enhance MASC [17]-[22]. Khan et al. [17] translated the image into a corresponding caption, which is then jointly input with the sentence into BERT to predict the sentiment polarity associated with specific targets. Yang et al. [23] exploit a face-sensitive, translation-based approach that translates facial expressions in images into textual sentiment cues, which are then selectively aligned and fused with the targets for enhanced sentiment analysis. Xiao et al. [19] proposed the CoolNet framework, which generates visual captions for images and extracts syntactic and semantic features from the textual" + }, + { + "type": "page_footnote", + "bbox": [ + 0.073, + 0.821, + 0.493, + 0.856 + ], + "angle": 0, + "content": "Luwei Xiao, and Liang He are with the School of Computer Science and Technology, East China Normal University, Shanghai 200062, China. E-mail: louisshaw@stu.ecnu.edu.cn, lhe@cs.ecnu.edu.cn" + }, + { + "type": "page_footnote", + "bbox": [ + 0.073, + 0.856, + 0.493, + 0.902 + ], + "angle": 0, + "content": "- Rui Mao, Shuai Zhao, Yanhao Jia and Erik Cambria are with the College of Computing and Data Science, Nanyang Technological University, Singapore 639798. E-mail:{rui.mao, shuai.zhao, cambria}@ntu.edu.sg, yanhao002@e.ntu.edu.sg" + }, + { + "type": "page_footnote", + "bbox": [ + 0.074, + 0.902, + 0.493, + 0.926 + ], + "angle": 0, + "content": "Qika Lin is with the Saw Swee Hock School of Public Health, National University of Singapore 119077. E-mail: linqika@nus.edu.sg" + }, + { + "type": "page_footnote", + "bbox": [ + 0.076, + 0.929, + 0.26, + 0.943 + ], + "angle": 0, + "content": "* Corresponding author: Rui Mao" + }, + { + "type": "list", + "bbox": [ + 0.073, + 0.821, + 0.493, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.265, + 0.061, + 0.709 + ], + "angle": 270, + "content": "arXiv:2504.15848v1 [cs.CL] 22 Apr 2025" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.518, + 0.045 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "2" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.054, + 0.492, + 0.083 + ], + "angle": 0, + "content": "modality, subsequently fusing these with visual features through a cross-modal Transformer." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.098, + 0.493, + 0.388 + ], + "angle": 0, + "content": "Despite substantial efforts and promising advancements, current solutions continue to encounter the following challenges. First, excessive duplicative visual patches can overshadow critical visual clues relevant to the specific target, leading to considerable misalignment during patch-token interactions. These small visual patches often lack semantic coherence compared to complete visual regions, particularly when aligning targets with their corresponding objects in an image, potentially leading to ambiguous semantic representations. Second, limited studies have focused on the underlying rationale behind sentiment cues, particularly from the perspectives of semantic content and affective-cognitive resonance. Owing to the multimodal nature of Twitter content, which spans diverse facets of daily life, inferring the sentiment associated with specific targets necessitates not only an understanding of the surface-level information in text and images (e.g., facial expressions) but also an in-depth comprehension of the contextual background of particular events and the impressions evoked by the image's content and aesthetic attributes." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.389, + 0.493, + 0.752 + ], + "angle": 0, + "content": "To address the aforementioned challenges, this paper proposes Chimera: a cognitive and aesthetic sentiment causality understanding framework. This framework aims to incorporate and align fine-grained features of specific targets and reasons about semantic and impression rationales. However, two critical issues must be resolved to achieve these objectives: 1) How can specific targets in a sentence be aligned with their corresponding object-level fine-grained features in an image? 2) How can the model be enabled to reason about the emotional causal reasons within the semantic content of image-text pairs and the affective resonance evoked by image aesthetic attributes? For the first question, we propose to make the cross-modal alignment of the target via the visual patch-level by linguistic-aware patch-token alignment and object-level by accurately translating the object feature into language space. Regarding the second issue, while a recent study [24] developed a reasoning dataset for MASC, this dataset primarily explains the emotional causes within textual content and lacks reasoning capabilities for visual content and the affective resonance evoked by images, limiting its suitability for the multimodal nature of this task. Consequently, we employ a large language model (LLM), GPT-4o, to generate the semantic rationale and impression rationale to understand the causal foundations of emotions." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.753, + 0.493, + 0.943 + ], + "angle": 0, + "content": "Specifically, our proposed framework first extracts visual patch-level and textual features, feeding them into a tailored linguistic-aware patch-token alignment (LPA) module to achieve patch-token alignment. Concurrently, a translation module (TM) translates the holistic image or object-level content into aesthetic captions or facial descriptions, leveraging multimodal named entity annotations from the work of Wang et al. [25]. The TM-generated text, along with the sentence and aspect, is then input into a generative module for multi-task learning to produce sentiment polarity, semantic rationale (SR), and impression rationale (IR). By bootstrapping the model's perception of underlying rationale through an in-depth understanding of textual and" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.923, + 0.096 + ], + "angle": 0, + "content": "visual content as well as the affective resonance evoked by images, it enhances the performance of sentiment classification." + }, + { + "type": "text", + "bbox": [ + 0.528, + 0.097, + 0.911, + 0.112 + ], + "angle": 0, + "content": "In a nutshell, the primary contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.118, + 0.923, + 0.206 + ], + "angle": 0, + "content": "- We propose a novel framework for MASC that aligns specific targets with their corresponding visual objects at the patch-token and object levels while equipping the model with causal rationale reasoning ability for semantic rationale (SR), and impression rationale (IR)." + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.207, + 0.923, + 0.336 + ], + "angle": 0, + "content": "- We approach this task by enabling the model to grasp the semantic content of image-text pairs and the affective resonance evoked by images. To our knowledge, we are the first to collect semantic and impression rationale data for the MASC task, based on existing MASC datasets, extending its content to incorporate semantic and impression rationale, offering a valuable resource for advancing MASC research." + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.337, + 0.923, + 0.41 + ], + "angle": 0, + "content": "- Experiments on three widely-used Twitter benchmarks demonstrate that our proposed method outperforms previous approaches, achieving state-of-the-art performance. Further evaluations validate the effectiveness of our approach for MASC tasks." + }, + { + "type": "list", + "bbox": [ + 0.529, + 0.118, + 0.923, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.416, + 0.925, + 0.562 + ], + "angle": 0, + "content": "The remainder of this paper is organized as follows: Section 2 provides an overview of related research on multimodal aspect-based sentiment classification, image aesthetic assessment, and multimodal learning. Section 3 details the proposed framework, including linguistics-aware patchtoken alignment, the translation-based module, causal rationale dataset construction, and LLM-based annotation generation. Main experimental results are presented in Section 4, and the in-depth analysis is shown in 5, followed by conclusions in Section 6." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.581, + 0.673, + 0.595 + ], + "angle": 0, + "content": "2 RELATED WORK" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.601, + 0.923, + 0.66 + ], + "angle": 0, + "content": "This section reviews key methods in multimodal aspect-based sentiment analysis and image aesthetic assessment. Additionally, as our novel rationale dataset is constructed using an LLM, we introduce LLMs for data annotation." + }, + { + "type": "title", + "bbox": [ + 0.504, + 0.676, + 0.884, + 0.692 + ], + "angle": 0, + "content": "2.1 Multimodal Aspect-based Sentiment Analysis" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.695, + 0.924, + 0.945 + ], + "angle": 0, + "content": "Sentiment analysis is a well-established research area focused on understanding and identifying human emotions and opinions across various contexts [26]–[31]. With the exponential growth of user-generated multimodal content (e.g., image-text pairs, video clips) on social platforms [32]–[35] has drawn substantial attention to Multimodal Aspect-based Sentiment Analysis (MABSA) [36]–[40]. The MABSA task consists of two sub-tasks: Multimodal Aspect Term Extraction (MATE) and our focused MASC task. MATE [41] is essentially a named entity recognition task aimed at identifying all relevant specific targets within the textual content of an image-text pair. MASC [42], [43] is a text classification task in which specific targets are provided, requiring the identification of their sentiment polarity (positive, neutral, or negative) based on the given image-text pair. A series of recent studies have successfully unified these two subtasks into a single framework, effectively" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.519, + 0.045 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "3" + }, + { + "type": "image", + "bbox": [ + 0.088, + 0.056, + 0.913, + 0.398 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.072, + 0.415, + 0.924, + 0.441 + ], + "angle": 0, + "content": "Fig. 1. The overall framework of the proposed Chimera. Chimera consists of four parts: Translation Module, Rationale Dataset Construction, Linguistic-aware Semantic Alignment, and Rationale-Aware Learning." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.464, + 0.492, + 0.742 + ], + "angle": 0, + "content": "streamlining the MABSA process [14], [15], [22], [44]–[47]. Among these studies, Yu et al. [12] proposed the Entity-Sensitive Attention and Fusion Network (ESAFN), which employs entity-oriented attention combined with a visual gate mechanism to model entity-sensitive inter-dynamics for MASC. Ju et al. [44] were the first to integrate MATE and MASC into a end-to-end task, developing a joint learning framework with cross-modal relation detection. Kruk et al. [35] proposed a multimodal framework for Instagram intent detection, integrating three taxonomies and the MDID dataset. It demonstrates that text-image fusion enhances accuracy by \\(9.6\\%\\) under semiotic divergence, emphasizing the necessity of multimodal models for capturing the non-intersective \"meaning multiplication\" inherent in social media. Yang et al. [15] improved cross-modal alignment modeling through a Transformer-based multi-task learning framework, incorporating text-guided cross-modal interactions and using adjective-noun pairs as supervision for a visual auxiliary task." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.753, + 0.493, + 0.943 + ], + "angle": 0, + "content": "Zhou et al. [16] developed an aspect-oriented multimodal fusion approach that constructs an informative dependency graph to minimize additional visual and textual noise in cross-modal interactions by selectively processing aspect-relevant textual and image features. Huang et al. [20] put forward to mapping images into scene graphs, using triplet semantic relationships among entities along with image captions to construct a relatedness matrix for achieving cross-modal alignment in MASC. More recently, Xiao et al. [22] introduced the Atlantis, a trident-shaped architecture that incorporates aesthetic attributes to enhance the emotional resonance of visual content. Fan et al. [24] devised a Flant5-based multi-task learning architecture to enhance the" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.464, + 0.923, + 0.508 + ], + "angle": 0, + "content": "model's reasoning capabilities for inferring underlying and direct causes of sentiment expressions. Additionally, they constructed a practical causal dataset for MASC." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.509, + 0.923, + 0.568 + ], + "angle": 0, + "content": "Our proposed method aims to achieve cross-modal alignment at the patch and object levels while equipping the model with reasoning capabilities to discern the semantic and impression rationale underlying sentiment expressions." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.588, + 0.765, + 0.603 + ], + "angle": 0, + "content": "2.2 Image Aesthetic Assessment" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.607, + 0.924, + 0.945 + ], + "angle": 0, + "content": "Image aesthetics play a fundamental role in shaping viewers' emotional responses and overall aesthetic experience through complex psychological and cognitive processes [48]. Image aesthetics pertain to the subjective evaluation and appreciation of its beauty [49]. Image Aesthetic Assessment seeks to systematically appraise this aesthetic quality by analyzing the visual appeal of images [50]. Empirical psychological research corroborates that images can trigger a wide range of emotions, which are influenced by their aesthetic attributes and semantic content [51]. Previous research concentrated on aesthetic image captioning and analysis through the aggregation of commentary on aesthetic attributes [52]. These studies address the concepts of style, layout, and aesthetics from the viewpoints of beauty and visual attractiveness. Recent scholarly efforts have focused on encouraging vision-language models to generate visual connotations and captions related to various aesthetic attributes (e.g., color, harmony, lighting, composition) [53]. More recently, Kruk et al. [54] introduced a connotation-rich dataset, Impressions, designed to explore the emotions, thoughts, and beliefs that images evoke, along with the aesthetic elements that elicit these responses. The introduction of this dataset marks a significant advance in the study of" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.519, + 0.045 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "4" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.054, + 0.492, + 0.081 + ], + "angle": 0, + "content": "how visual stimuli can influence complex perceptual and emotional outcomes." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.083, + 0.493, + 0.172 + ], + "angle": 0, + "content": "In this study, we utilize aesthetic attributes to capture sentiment cues within visual content at both object and holistic levels. Inspired by Impressions [54], we further prompt the LLM to generate impression rationales for MASC, enabling analysis of the underlying affective resonance evoked by images." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.189, + 0.37, + 0.204 + ], + "angle": 0, + "content": "2.3 LLMs-Based Rationale Generation" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.208, + 0.493, + 0.616 + ], + "angle": 0, + "content": "Recently, LLMs have achieved significant success across various downstream tasks [55]–[58]. LLMs such as GPT-40 [59], Gemini [60], and LLaMA-2 [61] hold significant potential to usher data annotation into a new era, functioning not merely as auxiliary tools but as vital enhancers of its effectiveness and quality [62], [63]. LLMs can automatically annotate samples, ensure consistency across large data volumes, and adapt to specific domains via fine-tuning, thereby establishing a new standard in deep learning [64]–[66]. The rationale represents the detailed cognitive process an individual typically follows when solving a problem, providing useful supplementary information for the final answer [67]. Early studies [68] typically relied on human experts to annotate rationale in datasets, significantly limiting availability and scalability. A bunch of diverse methodologies have been developed to produce high-quality and fine-grained rationale. Wang et al. [69] proposed to elucidate each choice in a sample by generating choice-specific rationales via LLMs. Wang et al. [70] enhanced the credibility of generated rationales by incorporating gold-standard answers and using contrastive decoding algorithms. Liu et al. [71] laid much emphasis on curating high-quality prompts to obtain fine-grained rationales from GPT-4o and build a logical chain-of-thought instruction-tuning dataset. More recently, Kang et al. [72] developed a sophisticated neural reranking mechanism to dynamically retrieve highly relevant supplementary documents for generating high-quality rationales in knowledge-intensive reasoning tasks." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.616, + 0.493, + 0.706 + ], + "angle": 0, + "content": "In this paper, we build upon the work of Wang et al. [70] by fully utilizing the dataset's gold-standard annotations to generate semantic and impression rationales through meticulously designed prompts. This approach ensures high-quality rationale generation while avoiding additional costs from trial-and-error OpenAI API usage fees." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.725, + 0.236, + 0.739 + ], + "angle": 0, + "content": "3 METHODOLOGY" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.744, + 0.492, + 0.833 + ], + "angle": 0, + "content": "This section presents our proposed framework for MASC, beginning with the task formalization, followed by the rationale dataset construction process, and concluding with the proposed method, comprising linguistic-aware semantic alignment, a translation module, rationale dataset construction and a rationale-aware learning framework." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.851, + 0.227, + 0.865 + ], + "angle": 0, + "content": "3.1 Task Definition" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.869, + 0.492, + 0.945 + ], + "angle": 0, + "content": "Given a multimodal dataset \\(M\\), each sample \\(X_{i}\\) consists of an image \\(V_{i}\\) paired with a sentence \\(S_{i}\\) containing one or more specific targets \\(T_{i}\\). The goal of MASC is to predict the sentiment polarity \\(Y_{i} \\in \\{\\text{Positive}, \\text{Negative}, \\text{Neutral}\\}\\) for a specific target \\(T_{i}\\). Moreover, our framework infers" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.054, + 0.925, + 0.158 + ], + "angle": 0, + "content": "both semantic rationale \\( SR_{i} \\) and impression rationale \\( IR_{i} \\), explaining the sentiment prediction \\( Y_{i} \\) for a specific target \\( T_{i} \\), based on multimodal semantic meaning and the affective resonance evoked by the image. In this study, the model outputs \\( SR_{i}, IR_{i}, Y_{i} \\) for an input sample \\( X_{i} = (S_{i}, V_{i}, T_{i}) \\), where \\( SR_{i} \\) and \\( IR_{i} \\) offer supplementary sentimental cues for sentiment prediction \\( T_{i} \\)." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.173, + 0.677, + 0.187 + ], + "angle": 0, + "content": "3.2 Method Overview" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.192, + 0.925, + 0.53 + ], + "angle": 0, + "content": "As shown in Figure 1, our proposed framework comprises four technical components, namely a Translation Module, Rationale Dataset Construction, Linguistic-aware Semantic Alignment, and Rationale-Aware Learning. The Translation Module converts visual content, both holistic and object-level, into language captions. For entire images, it generates emotion-laden aesthetic captions using our fine-tuned BLIP. For object-level content, it maps visuals to facial descriptions or aesthetic captions with rich emotional cues via EmoLA or our fine-tuned BLIP. The construction of the rationale dataset involves generating semantic and impression rationales. We curate prompts tailored to each rationale category and input them, along with the samples, into GPT-4o to collect the desired rationales. The Linguistic-aware Semantic Alignment module segments the input image into patches, dynamically selects and refines relevant visual patches, and achieves patch-token alignment guided by linguistic features from the input sentence. Lastly, we propose a Rationale-Aware Learning framework built up on a generative model that simultaneously learns sentiment classification, semantic rationale generation, and impression rationale generation from diverse textual inputs, such as sentences, aesthetic captions, and facial descriptions." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.545, + 0.69, + 0.559 + ], + "angle": 0, + "content": "3.3 Translation Module" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.564, + 0.925, + 0.812 + ], + "angle": 0, + "content": "This module translates visual content into overall aesthetic captions, object-level facial descriptions, or object-level aesthetic captions in textual form, embedding rich sentimental cues to facilitate object-level sentiment alignment. Specifically, we leverage object annotations from the Fine-Grained Multimodal Named Entity Recognition (MNER) task [25], which annotates specific targets in the sentence and their corresponding objects in the image. The MNER dataset is derived from the same Twitter dataset as the MASC datasets, incorporating the original image-text pairs from MASC. We further pre-process the MNER dataset and transfer its object annotations to the MASC dataset. To generate aesthetic captions rich in sentimental cues, we fine-tune a BLIP model using the recent aesthetic-specific dataset, Impression [54]. For facial description, we deploy the LLM-based EmoLA [73] to interpret fine-grained human mental states from images." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.812, + 0.925, + 0.945 + ], + "angle": 0, + "content": "To tackle the challenge of potential one-to-many annotation scenarios, wherein multiple visual objects correspond to a specific target in the sentence, we calculate the similarity between the entire image and all object annotations, retaining only the object with the highest similarity score for each specific target. Subsequently, we generate various textual auxiliary sentences, based on object annotations. Firstly, in cases where the object corresponding to a specific target is absent from the image, a fine-tuned BLIP model is applied to" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.519, + 0.045 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "5" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.053, + 0.49, + 0.084 + ], + "angle": 0, + "content": "generate an overall aesthetic caption \\( A^{c} = \\left(a_{1}^{c}, a_{2}^{c}, \\ldots, a_{N_{c}}^{c}\\right) \\) for the entire image:" + }, + { + "type": "equation", + "bbox": [ + 0.215, + 0.09, + 0.49, + 0.108 + ], + "angle": 0, + "content": "\\[\nA ^ {c} = B L I P _ {\\text {f i n e}} (V), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.113, + 0.493, + 0.232 + ], + "angle": 0, + "content": "where \\( BLIP_{fine}(\\cdot) \\) is the fine-tuned BLIP over Impression dataset. If the object corresponding to a specific target is present in the image, we develop a Human-Object Differentiation (HOD) module based on the Sample and Computation Redistribution for Efficient Face Detection (SCRFD) [74] framework. This module determines the presence of a face within the annotated object-level visual content and assigns a facial binary label:" + }, + { + "type": "equation", + "bbox": [ + 0.212, + 0.237, + 0.49, + 0.256 + ], + "angle": 0, + "content": "\\[\nY _ {i} ^ {o _ {j}} = H O D \\left(V _ {i} ^ {o _ {j}}\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.261, + 0.492, + 0.35 + ], + "angle": 0, + "content": "where \\( Y_{i}^{o_{j}} \\in [1,0] \\) indicates whether the object-level visual content contains a face (0 for no face, 1 for face detected), and \\( V_{i}^{o_{j}} \\) denotes the \\( j \\)-th object-level visual content in the \\( i \\)-th image. Subsequently, we generate facial descriptions or aesthetic captions for object-level visual content based on the facial binary label:" + }, + { + "type": "equation", + "bbox": [ + 0.152, + 0.356, + 0.49, + 0.395 + ], + "angle": 0, + "content": "\\[\nA ^ {o} = \\left\\{ \\begin{array}{l l} E m o L A \\left(V _ {i} ^ {o _ {j}}\\right), & \\text {i f} Y _ {i} ^ {o _ {j}} = 1, \\\\ B L I P _ {\\text {f i n e}} \\left(V _ {i} ^ {o _ {j}}\\right), & \\text {o t h e r w i s e}, \\end{array} \\right. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.401, + 0.492, + 0.446 + ], + "angle": 0, + "content": "where \\( A^o = (a_1^o, a_2^o, \\ldots, a_{N_o}^o) \\) is the generated auxiliary sentence (facial description or aesthetic caption) for the object-level visual content." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.473, + 0.348, + 0.487 + ], + "angle": 0, + "content": "3.4 Rationale Dataset Construction" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.495, + 0.493, + 0.701 + ], + "angle": 0, + "content": "The current MASC benchmark includes only specific target (aspect) labels within the image-text pair sentences and their corresponding sentiment polarities. Recently, Fan et al. [24] introduced a dataset for MASC with cause analysis, focusing exclusively on textual semantics rather than integrating both visual and textual cues. Moreover, they overlook the affective resonance evoked by image aesthetic attributes, eliminating a crucial layer of emotional cues and resulting in an incomplete sentiment representation. This omission hinders the holistic integration of textual and visual modalities, leading to suboptimal sentiment modeling. Therefore, we employ GPT-4o to generate semantic and impression rationales, with the detailed generation process outlined in Algorithm 1." + }, + { + "type": "code_caption", + "bbox": [ + 0.074, + 0.715, + 0.386, + 0.73 + ], + "angle": 0, + "content": "Algorithm 1 Rationale Dataset Construction" + }, + { + "type": "algorithm", + "bbox": [ + 0.074, + 0.733, + 0.493, + 0.94 + ], + "angle": 0, + "content": "Input: All samples \\((V, S, T, Y)\\) in MASC dataset \\(M\\) \nOutput: Rationale dataset \\(R\\) which contains Semantic Rationale (SR) and Impression Rationale (IR) \n1: Design & refine prompt pool for SR (SRP) and IR (IRP) \n2: for each sample \\((V_i, S_i, T_i, Y_i)\\) in \\(M\\) do \n3: //Randomly select a prompt from SRP for SR \n4: \\(SR_{prompt} \\gets PromptPoolforSR(V_i, S_i, T_i, Y_i)\\) \n5: //Randomly select a prompt from IRP for IR \n6: \\(IR_{prompt} \\gets PromptPoolforIR(V_i, S_i, T_i, Y_i)\\) \n7: Produce SR and IR via GPT-4o \n8: \\(SR_i \\gets GPT-4o(V_i, S_i, T_i, Y_i, SR_{prompt})\\) \n9: \\(IR_i \\gets GPT-4o(V_i, S_i, T_i, Y_i, IR_{prompt})\\) \n10: Add \\((V_i, S_i, T_i, Y_i, SR_i, IR_i)\\) to \\(R\\) \n11: end for" + }, + { + "type": "table_caption", + "bbox": [ + 0.563, + 0.055, + 0.866, + 0.08 + ], + "angle": 0, + "content": "TABLE1 Example prompts for semantic rationale generation." + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.092, + 0.912, + 0.396 + ], + "angle": 0, + "content": "
TypePrompts
System PromptYou are an AI assistant specializing in multimodal understanding and sentiment analysis, particularly in scenarios involving the integration of image and text modalities.
Semantic Rationale Generation PromptYou will be provided with an image-text pair. Your task is to analyze the sentiment towards the specified entity {aspect} and explain why the sentiment polarity {label} is appropriate.\nYour explanation should consider both the semantic meaning of the text and the visual representation of the image, focusing on explicit content and the emotional or contextual cues conveyed by their combination.\nStart your response with: "Based on the image-text pair, the sentiment towards {aspect} is {label} because...". Provide a concise, focused explanation highlighting the single most compelling reason for this sentiment classification.
" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.417, + 0.924, + 0.52 + ], + "angle": 0, + "content": "To comprehensively capture the emotional rationale underlying the identified sentiment polarity from a semantic perspective of both image and text, we employ GPT-4o (gpt-4o-2024-05-13) via the OpenAI \\(\\mathrm{API}^1\\) to generate SR. Meanwhile, to enable the model to effectively capture implicit emotional cues arising from the affective resonance of aesthetic attributes, we employ GPT-4o to generate the IR." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.52, + 0.923, + 0.681 + ], + "angle": 0, + "content": "To enhance the diversity of generated semantic and impression rationales (SR and IR), we designed and refined a series of templates to construct separate prompt pools for SR and IR, from which a prompt is randomly selected as instructions to guide GPT-4o in generating the corresponding rationale. In this study, we adopt the approach outlined by Sarah et al. [75] and Wang et al. [70], leveraging tailored prompts conditioned on the dataset's gold-standard annotations to generate SR and IR using GPT-4o. The example prompts for generating SR and IR are presented in Tables 1 and 2, respectively." + }, + { + "type": "title", + "bbox": [ + 0.504, + 0.696, + 0.866, + 0.711 + ], + "angle": 0, + "content": "3.5 Linguistic-aware Semantic Alignment(LSA)" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.714, + 0.923, + 0.773 + ], + "angle": 0, + "content": "We first introduce dynamic patch selection in Sec. 3.5.1. Then, we introduce the semantic patch calibration in Sec. 3.5.2. and patch-token alignment in Sec. 3.5.3. The overall process of LSA is shown in the persucode 2." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.784, + 0.776, + 0.8 + ], + "angle": 0, + "content": "3.5.1 Dynamic Patch Selection(DPS)" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.801, + 0.922, + 0.889 + ], + "angle": 0, + "content": "Dynamic Patch Selection (DPS) is considered a discriminative task that assigns significance scores to visual patches and selects valuable patches based on high scores. For the image in an image-text pair, we opt for vision Transformers as the visual encoder. The image \\( V \\) is divided into \\( N_v \\) non-overlapping patches by spatial distribution." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.889, + 0.923, + 0.918 + ], + "angle": 0, + "content": "These patches are then input as a visual token sequence into the vision Transformer to obtain a set of visual" + }, + { + "type": "footer", + "bbox": [ + 0.518, + 0.929, + 0.706, + 0.943 + ], + "angle": 0, + "content": "1. https://platform.openai.com" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.519, + 0.045 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "6" + }, + { + "type": "table_caption", + "bbox": [ + 0.127, + 0.055, + 0.439, + 0.08 + ], + "angle": 0, + "content": "TABLE2 Example prompts for impression rationale generation." + }, + { + "type": "table", + "bbox": [ + 0.088, + 0.092, + 0.481, + 0.367 + ], + "angle": 0, + "content": "
TypePrompts
System PromptYou are an AI assistant specializing in multimodal emotion and aesthetic understanding, especially in analyzing the emotional responses elicited by visual content.
Impression Rationale Generation PromptYou will be given an image-text pair. Your task is to analyze the specified entity {aspect} and its associated sentiment label {label} based entirely on the image's aesthetic attributes and the emotional resonance it conveys.Focus exclusively on the overall impression and visual connotations conveyed by the image, emphasizing why the assigned sentiment {label} aligns with the general mood or perception evoked by the entity. Avoid mentioning specific details; instead, high-light the prevailing emotional or aesthetic impression.
" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.409, + 0.491, + 0.528 + ], + "angle": 0, + "content": "patch features \\( V = (v_{cls}, v_1, v_2, \\ldots, v_{N_v}) \\in \\mathbb{R}^{(N_v + 1) \\times d} \\). For sentence \\( S \\), a pre-trained Transformer serves as the textual encoder. The sentence is tokenized into \\( N_s \\) tokens and processed by the encoder to extract linguistic features \\( S = (s_1, s_2, \\ldots, s_{N_s}) \\in \\mathbb{R}^{N_s \\times d} \\). Subsequently, we incorporate spatial information from images into visual patch features and use an MLP-based score-sensitive prediction mechanism to learn significant scores:" + }, + { + "type": "equation", + "bbox": [ + 0.122, + 0.535, + 0.49, + 0.552 + ], + "angle": 0, + "content": "\\[\np _ {i} ^ {s} = \\operatorname {S i g m o i d} \\left(\\mathbf {M L P} \\left(\\boldsymbol {v} _ {i}\\right)\\right), i \\in \\{1, 2, \\dots , N _ {v} \\}, \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.558, + 0.49, + 0.72 + ], + "angle": 0, + "content": "where \\( p_i^s \\in [0,1] \\) represents the importance score assigned to each visual patch. Moreover, achieving refined cross-modal alignment requires more than depending solely on a scoring mechanism to identify valuable visual patches without linguistic supervision [76], [77]. Consequently, we introduce linguistic context by calculating attentive scores between visual patches and the input sentence. First, we derive linguistic-aware scores \\( p_i^l \\) through cross-attention between visual patches and linguistic features. Then, we enhance key visual content by computing self-attention within patches, producing image-prominent scores \\( p_i^e \\):" + }, + { + "type": "equation", + "bbox": [ + 0.12, + 0.725, + 0.49, + 0.745 + ], + "angle": 0, + "content": "\\[\np _ {i} ^ {l} = \\operatorname {N o r m} \\left(\\boldsymbol {v} _ {i} \\cdot S / d\\right), p _ {i} ^ {e} = \\operatorname {N o r m} \\left(\\boldsymbol {v} _ {i} \\cdot V / d\\right), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.75, + 0.491, + 0.809 + ], + "angle": 0, + "content": "where \\( \\text{Norm}(\\cdot) \\) denotes the normalization of scores to a range from 0 to 1. \\( S \\) and \\( V \\) represent the global embeddings for linguistic features and visual patches, respectively. These scores are integrated to derive the final value score:" + }, + { + "type": "equation", + "bbox": [ + 0.171, + 0.815, + 0.49, + 0.844 + ], + "angle": 0, + "content": "\\[\np _ {i} ^ {f} = (1 - \\beta) p _ {i} ^ {s} + \\frac {\\beta}{2} \\left(p _ {i} ^ {l} + p _ {i} ^ {e}\\right), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.849, + 0.492, + 0.945 + ], + "angle": 0, + "content": "where \\(\\beta\\) refers to the weight parameter. After obtaining the value score \\(p^f = (p_1^f, p_2^f, p_3^f, \\ldots, p_{N_v}^f) \\in \\mathbb{R}^{N_v}\\), we convert it into a binary decision matrix \\(\\{0, 1\\}^{N_v}\\) to determine patch selection. This matrix is constructed using the Gumbel-Softmax technique [78], ensuring a smooth and differentiable sampling process. The Gumbel-Softmax matrix" + }, + { + "type": "code_caption", + "bbox": [ + 0.507, + 0.051, + 0.91, + 0.067 + ], + "angle": 0, + "content": "Algorithm 2 Linguistic-aware Semantic Alignment (LSA)" + }, + { + "type": "algorithm", + "bbox": [ + 0.511, + 0.069, + 0.923, + 0.425 + ], + "angle": 0, + "content": "1: procedure DYNAMIC PATCH SELECTION(V, S) \n2: Extract visual patches \\(V \\leftarrow \\mathrm{ViT}(V)\\), text tokens \\(S \\leftarrow\\) TextEnc(S) \n3: Compute significance scores: \\(p_i^s \\leftarrow \\mathrm{MLP}(v_i)\\), \\(p_i^l \\leftarrow \\mathrm{Norm}(v_i S^\\top)\\), \\(p_i^e \\leftarrow \\mathrm{Norm}(v_i V^\\top)\\) \n4: Fuse scores: \\(p_i^f \\leftarrow (1 - \\beta)p_i^s + \\frac{\\beta}{2}(p_i^l + p_i^e)\\) \n5: Apply Gumbel-Softmax sampling to obtain binary mask \\(D \\in \\{0, 1\\}^{N_v}\\) \n6: Return selected patches \\(V^p \\leftarrow \\{v_i | D_i = 1\\}\\) \n7: end procedure \n8: procedure SEMANTIC PATCH CALIBRATION(\\(V^p\\)) \n9: Aggregate key patches: \\(\\tilde{V}^p \\leftarrow \\mathrm{Softmax}(\\mathrm{MLP}(V^p)) \\cdot V^p \\quad \\triangleright\\) Adaptive weighting \n10: Fuse redundant patches: \\(\\tilde{v}^r \\leftarrow \\sum \\tilde{p}_i v_i \\quad \\triangleright\\) Weighted sum via \\(p^f\\) \n11: Return \\(\\tilde{V}^p \\leftarrow [v_{cls}; \\tilde{V}^p; \\tilde{v}^r]\\) \n12: end procedure \n13: procedure PATCH-TOKEN ALIGNMENT(\\(\\tilde{V}^p, S\\)) \n14: Compute cosine similarity matrix \\(A \\in \\mathbb{R}^{(N_f + 2) \\times N_s}\\) \n15: Calculate alignment score \\(K(V, S) \\leftarrow \\frac{1}{2} (\\text{mean}(\\text{max}_j A_{ij}) + \\text{mean}(\\text{max}_i A_{ij}))\\) \n16: Optimize with \\(\\mathcal{L}_{\\text{align}} \\leftarrow \\text{Bi-directional Triplet Loss}(K(V, S), K(V, \\hat{S}), K(\\hat{V}, S))\\) \n17: end procedure" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.452, + 0.602, + 0.465 + ], + "angle": 0, + "content": "is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.57, + 0.47, + 0.922, + 0.507 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {M} _ {i, l} = \\frac {\\exp \\left(\\log \\left(\\boldsymbol {m} _ {i , l} + G _ {i , l}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {L} \\exp \\left(\\log \\left(\\boldsymbol {m} _ {i , j} + G _ {i , j}\\right) / \\tau\\right)}, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.512, + 0.922, + 0.587 + ], + "angle": 0, + "content": "where \\(M \\in \\mathbb{R}^{N_v \\times L}\\), \\(L\\) indicates the total number of categories. In this scenario, \\(L\\) is set to 2 for the binary decision \\((\\pmb{m}_{i,1} = p_i^f, \\pmb{m}_{i,2} = 1 - p_i^f)\\). \\(G_i = -\\log (-\\log (U_i))\\) represents the Gumbel distribution, \\(U_i\\) refers to the uniform distribution and \\(\\tau\\) is the temperature parameter." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.587, + 0.922, + 0.618 + ], + "angle": 0, + "content": "Next, we obtain the differentiable decision matrix \\(D\\) by applying the arg-max on \\(M\\):" + }, + { + "type": "equation", + "bbox": [ + 0.593, + 0.623, + 0.922, + 0.641 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {D} = \\operatorname {S a m p l i n g} (\\boldsymbol {M}) _ {*}, 1 \\in \\{0, 1 \\} ^ {N _ {v}}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.647, + 0.922, + 0.735 + ], + "angle": 0, + "content": "where \\( D \\) indicates patch selection outcomes: \"1\" for important patches and \"0\" for redundant ones. In the training stage, gradients are backpropagated through the differentiable decision matrix, enabling the dynamic selection of valuable visual patches via the score-sensitive prediction mechanism." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.748, + 0.791, + 0.764 + ], + "angle": 0, + "content": "3.5.2 Semantic Patch Calibration(SPC)" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.766, + 0.922, + 0.902 + ], + "angle": 0, + "content": "This section aims to further refine the semantic representation of the selected valuable visual patches. After dynamically selecting important visual patches guided by linguistic supervision, we designate them as \\( V^{p} = \\left(v_{1}^{p}, v_{2}^{p}, \\ldots, v_{N_{p}}^{p}\\right) \\in \\mathbb{R}^{N_{p} \\times d} \\). \\( N_{p} \\) is the number of selected valuable visual patches. We employ an aggregation network [79] to model multiple aggregation weights and combine the selected \\( N_{p} \\) visual patches to generate \\( N_{f} \\) informative visual features:" + }, + { + "type": "equation", + "bbox": [ + 0.576, + 0.906, + 0.922, + 0.947 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\boldsymbol {v}} _ {j} ^ {p} = \\sum_ {i = 1} ^ {N _ {p}} (\\boldsymbol {W}) _ {i j} \\cdot \\boldsymbol {v} _ {i} ^ {p}, \\quad j = [ 1, \\dots , N _ {f} ], \\tag {9}\n\\]" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.519, + 0.045 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "7" + }, + { + "type": "equation", + "bbox": [ + 0.183, + 0.053, + 0.49, + 0.071 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {W} = \\operatorname {s o f t m a x} \\left(\\mathbf {M L P} \\left(\\boldsymbol {V} ^ {p}\\right)\\right), \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.077, + 0.491, + 0.194 + ], + "angle": 0, + "content": "where \\((\\mathbf{W})\\) denotes the normalized weight matrix and \\(\\sum_{i=1}^{N_s} (\\mathbf{W})_{ij} = 1\\). \\(N_f\\) is the number of aggregated patches \\((N_f < N_p)\\). The aggregation network adaptively combines visually similar patches and is differentiable for end-to-end training. While redundant visual patches can be discarded, they may contain supplementary semantic features for refined cross-modal alignment. Therefore, we fuse them into a single patch:" + }, + { + "type": "equation", + "bbox": [ + 0.128, + 0.201, + 0.491, + 0.25 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\boldsymbol {v}} ^ {r} = \\sum_ {i \\in \\mathcal {N}} \\tilde {p} _ {i} \\cdot \\boldsymbol {v} _ {i}, \\quad \\tilde {p} _ {i} = \\frac {\\exp \\left(p _ {i} ^ {f}\\right) \\boldsymbol {D} _ {i}}{\\sum_ {i = 1} ^ {N} \\exp \\left(p _ {i} ^ {f}\\right) \\boldsymbol {D} _ {i}}, \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.255, + 0.492, + 0.323 + ], + "angle": 0, + "content": "where \\(\\mathcal{N}\\) represents the set for redundant visual patches. \\(\\tilde{p}_i\\) denotes the normalized score of the value score \\(p_i^f\\). Finally, this component models the calibrated refined visual patches, denoted as \\(\\tilde{V}^p = (v_{cls},\\tilde{v}_1^p,\\tilde{v}_2^p,\\dots ,\\tilde{v}_{N_f}^p,\\tilde{v}^r)\\in \\mathbb{R}^{(N_f + 2)\\times d}\\)." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.332, + 0.325, + 0.348 + ], + "angle": 0, + "content": "3.5.3 Patch-token Alignment(PTA)" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.35, + 0.491, + 0.53 + ], + "angle": 0, + "content": "This module aims to achieve the fine-grained patch-token level alignment. Specifically, we first utilize the refined visual patches \\(\\tilde{V}^p\\) and linguistic features \\(S\\) to compute tokenwise similarities, producing a patch-token similarity matrix \\(A\\in \\mathbb{R}^{(N_f + 2)\\times N_s}\\). \\((A)_{ij} = \\frac{(\\tilde{v}_i)^T s_j}{\\|\\tilde{v}_i\\| \\|s_j\\|}\\) denotes the patch-token level alignment score between the \\(i\\)-th visual patch and the \\(j\\)-th word. Subsequently, maximum-correspondence interaction is introduced to aggregate cross-modal alignment. For each visual patch (or token), we identify the most aligned textual token (or patch) and calculate the average alignment score \\(K(V,S)\\), representing the overall alignment between the image \\(V\\) and the sentence \\(S\\):" + }, + { + "type": "equation", + "bbox": [ + 0.09, + 0.537, + 0.49, + 0.59 + ], + "angle": 0, + "content": "\\[\nK (V, S) = \\frac {1}{N _ {f} + 2} \\sum_ {i = 1} ^ {N _ {f} + 2} \\max _ {j} (\\boldsymbol {A}) _ {i j} + \\frac {1}{N _ {s}} \\sum_ {j = 1} ^ {N _ {s}} \\max _ {i} (\\boldsymbol {A}) _ {i j} \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.591, + 0.49, + 0.621 + ], + "angle": 0, + "content": "Following a previous method [80], the bi-direction triplet loss with hard negative mining is exploited:" + }, + { + "type": "equation", + "bbox": [ + 0.143, + 0.627, + 0.49, + 0.68 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\text {a l i g n}} = \\sum_ {(V, S)} [ \\gamma - K (V, S) + K (V, \\hat {S}) ] _ {+} \\tag {13} \\\\ + [ \\gamma - K (V, S) + K (\\hat {V}, S) ] _ {+}, \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.686, + 0.491, + 0.761 + ], + "angle": 0, + "content": "where \\(\\gamma\\) is the trade-off parameter. \\([x]_{+} = \\max (x,0)\\) and \\((V,S)\\) refers to a positive image-text pair in the mini-batch. Moreover, \\(\\hat{S} = \\operatorname{argmax}_{j\\neq S}K(V,j)\\) and \\(\\hat{V} = \\operatorname{argmax}_{i\\neq V}K(i,V)\\) indicate the hardest negative sentence and visual examples within a mini-batch, respectively." + }, + { + "type": "title", + "bbox": [ + 0.073, + 0.778, + 0.307, + 0.794 + ], + "angle": 0, + "content": "3.6 Rationale-aware Learning" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.797, + 0.491, + 0.943 + ], + "angle": 0, + "content": "To endow the model with the ability to perform semantic causality and impression reasoning, we propose a rationale-aware learning framework designed to fine-tune a sequence-to-sequence (seq2seq) model. This seq2seq model is proposed to achieve three task objectives for each specific target within the image-text pair: sentiment classification (SC), semantic rationale generation (SRG), and impression rationale generation (IRG). These tasks are differentiated by the use of distinct input configurations and input content. For SC, the decoder outputs only the predicted sentiment" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.921, + 0.083 + ], + "angle": 0, + "content": "polarity. In SRG and IRG, the decoder produces the corresponding rationale and the sentiment prediction." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.083, + 0.922, + 0.228 + ], + "angle": 0, + "content": "Specifically, our input comprises the textual sentence \\( S = (s_{1}, s_{2}, \\ldots, s_{N_{s}}) \\), the overall aesthetic caption of the image \\( A^{c} = (a_{1}^{c}, a_{2}^{c}, \\ldots, a_{N_{c}}^{c}) \\), the object-level description \\( A^{o} = (a_{1}^{o}, a_{2}^{o}, \\ldots, a_{N_{o}}^{o}) \\), which pertains to either facial or aesthetic attributes and the specific target \\( T \\). The input format is determined by the presence of the specific target within the visual content. For example, if the specific target is identified in the image, based on the annotations provided by Wang et al. [25], the input for SC, SRG, and IRG is defined as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.605, + 0.236, + 0.922, + 0.253 + ], + "angle": 0, + "content": "\\[\nH ^ {\\mathrm {s c}} = \\operatorname {e n c o d e r} \\left(t _ {\\langle \\mathrm {s c} \\rangle}, A ^ {c}, S, T\\right), \\tag {14}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.601, + 0.259, + 0.921, + 0.276 + ], + "angle": 0, + "content": "\\[\nH ^ {\\mathrm {s r g}} = \\operatorname {e n c o d e r} \\left(t _ {\\langle \\mathrm {s r g} \\rangle}, A ^ {c}, S, T\\right), \\tag {15}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.601, + 0.283, + 0.921, + 0.3 + ], + "angle": 0, + "content": "\\[\nH ^ {\\text {i r g}} = \\operatorname {e n c o d e r} \\left(t _ {\\langle \\mathrm {i r g} \\rangle}, A ^ {c}, S, T\\right), \\tag {16}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.307, + 0.922, + 0.468 + ], + "angle": 0, + "content": "where encoder \\((\\cdot)\\) is the Transformer encoder of the seq2seq model. The tokens \\(t_{\\langle \\mathrm{sc}\\rangle}, t_{\\langle \\mathrm{src}\\rangle},\\) and \\(t_{\\langle \\mathrm{irg}\\rangle}\\) are specialized tokens designed to represent distinct tasks. Although the specific aspects are not present in the image, this does not imply that sentimental cues from the image have no impact on predicting the sentiment polarity. On the contrary, incorporating sentiment cues from the holistic image can provide valuable insights into the influence of image aesthetic attributes on the sentiment prediction for the specific aspect. For samples where specific targets are present in the visual content, the input format is structured as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.605, + 0.475, + 0.921, + 0.492 + ], + "angle": 0, + "content": "\\[\nH ^ {\\mathrm {s c}} = \\operatorname {e n c o d e r} \\left(t _ {\\langle \\mathrm {s c} \\rangle}, S, A ^ {o}, T\\right), \\tag {17}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.6, + 0.498, + 0.921, + 0.515 + ], + "angle": 0, + "content": "\\[\nH ^ {\\mathrm {s r g}} = \\operatorname {e n c o d e r} \\left(t _ {\\left(\\mathrm {s r g}\\right)}, S, A ^ {o}, T\\right), \\tag {18}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.601, + 0.522, + 0.921, + 0.539 + ], + "angle": 0, + "content": "\\[\nH ^ {\\text {i r g}} = \\operatorname {e n c o d e r} \\left(t _ {\\langle \\text {i r g} \\rangle}, S, A ^ {o}, T\\right). \\tag {19}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.545, + 0.922, + 0.72 + ], + "angle": 0, + "content": "We employ fine-grained, object-level emotion-laden descriptions to establish alignment between specific targets and their corresponding objects in the image, which enhances both the accuracy and interpretability of the sentiment prediction process. Subsequently, these hidden features are passed through a stack of self-attention-based encoders, which dynamically fuse representations and model both intra-modal and cross-modal interactions. Finally, the decoder produces task-specific outputs. For Sentiment Classification (SC), the decoder generates the predicted sentiment polarity, selecting from \"positive,\" \"negative,\" or \"neutral,\" denoted as \\(\\hat{y}_{sc}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.629, + 0.727, + 0.921, + 0.745 + ], + "angle": 0, + "content": "\\[\nG ^ {\\mathrm {s c}} = \\left[ \\langle \\mathrm {s e n} \\rangle \\hat {y} ^ {\\mathrm {s c}} \\langle / \\mathrm {s e n} \\rangle \\right], \\tag {20}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.751, + 0.922, + 0.84 + ], + "angle": 0, + "content": "where the special tokens \\(\\langle \\mathrm{sen}\\rangle\\) and \\(\\langle / \\mathrm{sen}\\rangle\\) are denoted as the start and end markers for SC predictors. For the two additional rationale generation tasks SRG and IRG, the decoder generates not only the semantic rationale \\(\\hat{s}r\\) and impression rationale \\(\\hat{i}r\\) for the specific target but also their corresponding sentiment predictions \\(\\hat{y}_{sr}\\) and \\(\\hat{y}_{si}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.592, + 0.847, + 0.921, + 0.865 + ], + "angle": 0, + "content": "\\[\nG ^ {\\mathrm {s r}} = \\left[ \\langle \\mathrm {s r} \\rangle \\hat {s} r \\langle / \\mathrm {s r} \\rangle \\langle \\mathrm {s e n} \\rangle \\hat {y} ^ {\\mathrm {s r}} \\langle / \\mathrm {s e n} \\rangle \\right], \\tag {21}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.594, + 0.868, + 0.921, + 0.893 + ], + "angle": 0, + "content": "\\[\nG ^ {\\mathrm {i r}} = \\left[ \\langle \\mathrm {i r} \\rangle \\hat {i r} \\langle / \\mathrm {i r} \\rangle \\langle \\mathrm {s e n} \\rangle \\hat {y} ^ {\\mathrm {i r}} \\langle / \\mathrm {s e n} \\rangle \\right], \\tag {22}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.898, + 0.922, + 0.943 + ], + "angle": 0, + "content": "where \\(\\langle \\mathrm{sr}\\rangle\\), \\(\\langle / \\mathrm{sr}\\rangle\\), \\(\\langle \\mathrm{ir}\\rangle\\), \\(\\langle / \\mathrm{ir}\\rangle\\), \\(\\langle \\mathrm{sen}\\rangle\\), and \\(\\langle / \\mathrm{sen}\\rangle\\) serve as specialized markers to delineate the rationale and sentiment polarity. Finally, the input sequence is uniformly denoted" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.519, + 0.045 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "8" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.054, + 0.493, + 0.099 + ], + "angle": 0, + "content": "as \\( X \\), and the generated textual content is represented as \\( Z = \\{z_{1}, z_{2}, \\ldots, z_{N_{z}}\\} \\). Consequently, the loss function for the generation process is formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.128, + 0.105, + 0.49, + 0.145 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {Z} = - \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\sum_ {n _ {z} = 1} ^ {N _ {z}} \\log P \\left(z _ {i, n _ {z}} \\mid \\hat {z} _ {i, < n _ {z}}, X\\right), \\tag {23}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.151, + 0.492, + 0.283 + ], + "angle": 0, + "content": "where \\(z_{i,n_z}\\) is the ground truth token at position \\(n_z\\) for sample \\(i\\), \\(\\hat{z}_{i, < n_z}\\) represents the generated sequence up to position \\(n_z - 1\\) for sample \\(i\\), and \\(P(z_{i,n_z} \\mid \\hat{z}_{i, < n_z}, X)\\) denotes the probability of generating token \\(z_{i,n_z}\\) conditioned on \\(\\hat{z}_{i, < n_z}\\) and \\(X\\). In this rationale-aware learning framework, since all objectives are formulated as generative tasks, the loss functions \\(\\mathcal{L}_{SC}\\), \\(\\mathcal{L}_{SRG}\\), and \\(\\mathcal{L}_{IRG}\\) are all employ the generative loss function, E.q. 23. Therefore, the objective function in the proposed method is formulated as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.093, + 0.288, + 0.49, + 0.318 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} = \\alpha \\mathcal {L} _ {\\mathrm {S C}} + \\frac {1 - \\alpha}{2} \\mathcal {L} _ {\\mathrm {S R G}} + \\frac {1 - \\alpha}{2} \\mathcal {L} _ {\\mathrm {I R G}} + \\lambda \\mathcal {L} _ {\\text {a l i g n}}, \\tag {24}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.322, + 0.492, + 0.368 + ], + "angle": 0, + "content": "where \\(\\alpha, \\lambda \\in (0,1)\\) are tradeoff hyperparameters that regulate the relative contributions of each generative loss and the patch-token alignment." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.39, + 0.248, + 0.405 + ], + "angle": 0, + "content": "4 EXPERIMENTS" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.411, + 0.492, + 0.501 + ], + "angle": 0, + "content": "In this section, we provide a comprehensive description of the experimental settings and evaluate the proposed method on three publicly available MASC datasets, benchmarking it against state-of-the-art methods. Furthermore, we perform an extensive series of studies to thoroughly analyze the effectiveness of the proposed approach." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.521, + 0.28, + 0.537 + ], + "angle": 0, + "content": "4.1 Experimental Settings" + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.54, + 0.192, + 0.555 + ], + "angle": 0, + "content": "4.1.1 Datasets" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.559, + 0.493, + 0.75 + ], + "angle": 0, + "content": "We utilize three widely recognized benchmark datasets for MASC [11], [81]: Twitter-2015, Twitter-2017, and the Political Twitter dataset. Each sample within these datasets comprises a user-generated multimodal image-text pair, including an image, a textual sentence, and one or more specific targets. Each aspect is annotated with a sentiment label from the set Positive, Negative, Neutral. The detailed statistics of these datasets are presented in Table 3. Furthermore, we incorporate semantic rationale (SR), impression rationale (IR), aesthetic captions for the entire image (AC), facial descriptions (FD), and aesthetic captions for objects (AO) for each data point. The maximum length for facial descriptions and aesthetic captions is constrained to 50 tokens." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.764, + 0.289, + 0.779 + ], + "angle": 0, + "content": "4.1.2 Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.782, + 0.493, + 0.945 + ], + "angle": 0, + "content": "We adopt the seq2seq model Flan-T5 [82] as the backbone of our generative framework. Specifically, the model is trained for 10 epochs using the AdamW optimizer [83], with a batch size of 4. A grid search is performed on the development set to determine the optimal learning rate, \\(\\alpha\\) and \\(\\lambda\\) for Flan-T5 across the three datasets. The selected values for learning rate are \\(3e - 4\\), \\(3e - 4\\), \\(1e - 4\\), respectively, for the Twitter-2015, Twitter-2017 and Political Twitter. The trade-off hyperparameter sets \\((\\alpha\\) and \\(\\lambda)\\) are 0.2, 0.1, 0.2 and 0.2, 0.5, 0.5, respectively, for the Twitter-2015, Twitter-2017 and Political Twitter. Consistent with prior research on MASC [11], [24]," + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.054, + 0.924, + 0.113 + ], + "angle": 0, + "content": "we employ Accuracy (Acc) and F1 score (F1) as the evaluation metrics. The model is implemented using PyTorch, and experiments are conducted on an NVIDIA V100 GPU with 30 GB of memory." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.13, + 0.704, + 0.145 + ], + "angle": 0, + "content": "4.2 Compared Baselines" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.149, + 0.924, + 0.209 + ], + "angle": 0, + "content": "We conducted a comprehensive comparative evaluation of the proposed method against a range of robust baseline approaches, which are classified into three categories. The first category consists of image-only methods:" + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.215, + 0.923, + 0.26 + ], + "angle": 0, + "content": "- Res-Target [84] leverages ResNet as its backbone to extract visual features exclusively for predicting the sentiment of the specified target." + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.266, + 0.867, + 0.282 + ], + "angle": 0, + "content": "The second category includes text-only approaches:" + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.287, + 0.923, + 0.329 + ], + "angle": 0, + "content": "- MemNet [85] employs a stacked architecture of multiple memory networks to build deep memory networks." + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.331, + 0.922, + 0.375 + ], + "angle": 0, + "content": "- MGAN [86] is based on a multi-grained attention architecture designed to adaptively capture both coarse-grained and fine-grained interactions." + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.375, + 0.922, + 0.419 + ], + "angle": 0, + "content": "- BERT [87] is a powerful pre-trained language model trained using a masked language modeling objective and next sentence prediction." + }, + { + "type": "list", + "bbox": [ + 0.529, + 0.287, + 0.923, + 0.419 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.426, + 0.923, + 0.457 + ], + "angle": 0, + "content": "Finally, this study incorporates the following advanced image-text multimodal approaches:" + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.462, + 0.923, + 0.535 + ], + "angle": 0, + "content": "- MIMN [88] comprises two customized interactive memory networks designed to capture both inter-modal dynamics between different modalities and intra-modal dynamics within each individual modality." + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.535, + 0.923, + 0.593 + ], + "angle": 0, + "content": "- ESAFN [12] is a target-sensitive interaction and fusion network designed to adaptively capture interactive features across modalities while also modeling intra-modality features." + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.593, + 0.923, + 0.651 + ], + "angle": 0, + "content": "- TomBERT [11] utilizes BERT and ResNet as backbone models for encoding textual and visual content, respectively. Cross-modal fusion is accomplished by integrating these features into a BERT encoder." + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.651, + 0.923, + 0.708 + ], + "angle": 0, + "content": "- JML-MASC [44] jointly extracts the specific targets and identifies their sentiment polarity by utilizing a visual de-nosing mechanism and attention-based fusion framework." + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.71, + 0.923, + 0.768 + ], + "angle": 0, + "content": "- EF-CapTrBERT [17] converts visual content into an auxiliary sentence, which is then combined with the input sentence and processed through a BERT encoder for sentiment prediction." + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.768, + 0.923, + 0.811 + ], + "angle": 0, + "content": "- VLP-MABSA [14] is a task-specific pre-trained generative framework for multimodal aspect-based sentiment analysis, built on the BART architecture." + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.811, + 0.923, + 0.869 + ], + "angle": 0, + "content": "- FITE [23] is a translation-based approach, which captures facial features in the image and translates them into a corresponding facial description as an auxiliary sentence for sentiment classification." + }, + { + "type": "text", + "bbox": [ + 0.529, + 0.87, + 0.923, + 0.944 + ], + "angle": 0, + "content": "- CMMT-MASC [15] is a cross-modal multi-task Transformer designed for MASC. Additionally, it employs multimodal gating mechanisms to dynamically regulate the flow of textual and visual information during interactions." + }, + { + "type": "list", + "bbox": [ + 0.529, + 0.462, + 0.923, + 0.944 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.519, + 0.045 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.913, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "9" + }, + { + "type": "table_caption", + "bbox": [ + 0.08, + 0.056, + 0.916, + 0.114 + ], + "angle": 0, + "content": "TABLE 3 Detailed Statistics of Twitter-2015, Twitter-2017, and Political Twitter datasets. The \"#sentence\" refers to the total number of sentences. \"#Avg. Length\" denotes the average length of sentences, while \"#Avg. Aspect\" indicates the average number of aspects in a sentence. \"#Avg. Length of SR\", \"#Avg. Length of IR\", \"#Avg. Length of AC\", \"#Avg. Length of FD\", and \"#Avg. Length of AO\" correspond to the average lengths of semantic rationales (SR), impression rationales (IR), aesthetic captions for the entire image, facial descriptions, and aesthetic captions for objects." + }, + { + "type": "table", + "bbox": [ + 0.156, + 0.127, + 0.839, + 0.378 + ], + "angle": 0, + "content": "
LabelTwitter-2015Twitter-2017Political Twitter
TrainDevTestTrainDevTestTrainDevTest
Positive92830331715085154933318570176
Neutral188367060716385175734697823368
Negative368149113416144168887166305
Total31791122103735621176123489021559849
#Sentence210172767417465775875105900407
#Avg. Length16.7216.7417.0516.2116.3716.3816.6216.6716.59
#Avg. Aspect1.511.541.542.042.042.101.741.732.09
#Avg. Length of SR42.542.442.542.642.843.042.742.642.2
#Avg. Length of IR56.756.055.755.556.155.455.956.156.3
#Avg. Length of AC35.935.935.532.532.531.634.034.233.3
#Avg. Length of FD39.238.537.838.938.539.339.038.438.7
#Avg. Length of AO29.129.730.328.929.428.929.129.131.3
" + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.402, + 0.492, + 0.476 + ], + "angle": 0, + "content": "- HIMT [89] is a Transformer framework that incorporates a hierarchical interaction component to model the relationships between specific aspects and the input sentence, as well as the interactions between specific aspects and object-level visual content." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.476, + 0.492, + 0.534 + ], + "angle": 0, + "content": "- IMT [13] is a coarse-to-fine-grained multimodal matching network that predicts image-target relevance and performs object-target alignment to support sentiment polarity identification." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.534, + 0.492, + 0.579 + ], + "angle": 0, + "content": "- CoolNet [19] is a fine-grained cross-modal alignment approach that aligns textual and visual content from both semantic and syntactic perspectives." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.579, + 0.492, + 0.651 + ], + "angle": 0, + "content": "- UnifiedTMSC [90] introduces a descriptive prompt paraphrasing paradigm to generate paraphrased prompts, while optimizing image vectors within the multimodal representation space of vision and language." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.651, + 0.492, + 0.709 + ], + "angle": 0, + "content": "- VEMP [91] decodes the semantic information of visual elements by utilizing textual tokens in the image, target-aware adjective-noun pairs, and image captions." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.709, + 0.492, + 0.781 + ], + "angle": 0, + "content": "- Atlantis-MASC [22] is a trident-shaped, aesthetic-driven approach for joint MABSA, which integrates image aesthetic attributes and achieves effective alignment of vision and text across multiple granular levels." + }, + { + "type": "text", + "bbox": [ + 0.097, + 0.781, + 0.492, + 0.839 + ], + "angle": 0, + "content": "- MDCA [24] is a generative framework proposed to provide supplementary reasoning and explicit rationales to explain why specific content conveys certain sentiment." + }, + { + "type": "list", + "bbox": [ + 0.097, + 0.402, + 0.492, + 0.839 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.865, + 0.212, + 0.879 + ], + "angle": 0, + "content": "4.3 Main Results" + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.884, + 0.491, + 0.945 + ], + "angle": 0, + "content": "The main results are presented in Table 4. Given that the two additional rationale generation tasks contribute to improving sentiment prediction by providing explanations for the underlying causes of sentiment, we select the prediction" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.402, + 0.923, + 0.534 + ], + "angle": 0, + "content": "results from sentiment classification \\(\\hat{y}^{\\mathrm{sc}}\\) as the primary outcomes for accuracy and F1 score evaluation. As presented in Table 4, the proposed method demonstrates competitive performance on both Twitter datasets compared to strong baselines from both text-only and multimodal approaches. Specifically, it achieves the highest accuracy (81.61%) and F1 score (77.98%) on the Twitter-2015 dataset, as well as the best accuracy (75.62%) and a near-optimal F1 score (74.59%) on the Twitter-2017 dataset." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.535, + 0.923, + 0.945 + ], + "angle": 0, + "content": "Compared to the image-only approach (Res-Target), the proposed method achieves a remarkable improvement of over \\(21.73\\%\\) in accuracy on the Twitter-2015 dataset. Similarly, when compared to the best-performing text-only method (BERT), the proposed method demonstrates a substantial performance gain, with a \\(7.46\\%\\) increase in accuracy and a \\(9.12\\%\\) improvement in F1 on Twitter-2015. These observations underscore the limitations of single-modality approaches in capturing subtle sentiment cues from multimodal content. Moreover, the proposed method consistently outperforms recent multimodal models, such as UnifiedTMSC, Atlantis-MASC, and MDCA. For instance, UnifiedTMSC adopts a paraphrasing-based approach to enrich textual features but lacks explicit modeling of visual aesthetic-driven affective impact. On Twitter-2017, the proposed method achieves comparable F1 performance (74.59 vs. 74.70) while delivering higher accuracy (75.62 vs. 75.40), which highlights the complementary benefits of aesthetic affective resonance modeling. While Atlantis-MASC incorporates image aesthetics, it primarily relies on global alignment techniques, which may overlook the intricate relationships between aspects and objects. The proposed method surpasses Atlantis-MASC by \\(1.58\\%\\) in accuracy on Twitter-2017, underscoring the efficacy of its patch-token level and object-level alignment in capturing aspect-specific visual details. While MDCA incorporates reasoning and direct causality to explain sentiment causes, it primarily emphasizes textual semantic reasoning, which restricts its" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.032, + 0.519, + 0.044 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.924, + 0.044 + ], + "angle": 0, + "content": "10" + }, + { + "type": "table_caption", + "bbox": [ + 0.075, + 0.056, + 0.923, + 0.081 + ], + "angle": 0, + "content": "TABLE 4 The main results \\((\\%)\\) are presented with the best-performing results highlighted in bold and the second-best values indicated with underlined text." + }, + { + "type": "table", + "bbox": [ + 0.136, + 0.093, + 0.859, + 0.458 + ], + "angle": 0, + "content": "
ModalityModelVenueTwitter-2015Twitter-2017Political Twitter
AccF1AccF1AccF1
Image OnlyRes-TargetCVPR 201659.8846.4858.5953.9860.2158.42
Text OnlyMemNetEMNLP 201670.1161.7664.1860.90--
MGANEMNLP 201871.1764.2164.7561.4667.3762.78
BERTNAACL 201974.1568.8668.1565.2369.4164.25
Image and TextMIMNAAAI 201971.8465.6965.8862.9970.5265.39
ESAFNTASLP 201973.3867.3767.8364.2269.2264.66
TomBERTIJCAI 201977.1571.1570.3468.0369.6562.35
JML-MASCEMNLP 202178.70-72.70-70.1468.37
EF-CapTrBERTACM MM 202178.0173.2569.7768.4269.0464.94
VLP-MABSAACL 202278.6073.8073.8071.8070.3269.64
CMMT-MASCIPM 202277.90-73.8---
FITEEMNLP 202278.4973.9070.9068.7068.6465.83
HIMTTAC 202278.1473.6871.1469.16--
IMTIJCAI 202278.2774.1972.6171.9769.9267.86
CoolNetIPM 202379.9275.2871.6469.5870.9170.25
UnifiedTMSCEMNLP 202379.8076.3075.4074.70--
VEMPEMNLP 202378.8875.0973.0172.42--
Atlantis-MASCINFFUS 202479.03-74.20-69.8368.97
MDCATNNLS 202480.7177.1573.9172.3771.3870.94
OursChimera-81.6177.9875.6274.5972.5672.32
" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.481, + 0.493, + 0.601 + ], + "angle": 0, + "content": "ability to effectively capture detailed visual content and the corresponding aesthetic affective resonance. In contrast, the proposed method surpasses MDCA with a \\(0.90\\%\\) improvement in accuracy and a \\(0.83\\%\\) increase in F1 on the Twitter-2015 dataset. This performance gain highlights the advantages of comprehensively understanding sentiment causality from both visual-textual semantic and affective resonance perspectives." + }, + { + "type": "title", + "bbox": [ + 0.073, + 0.617, + 0.315, + 0.632 + ], + "angle": 0, + "content": "4.4 Results on Political Twitter" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.636, + 0.491, + 0.739 + ], + "angle": 0, + "content": "The Political Twitter dataset differs significantly from Twitter-2015 and Twitter-2017, especially due to its challenging domain shift between training, development, and test sets. Such domain differences create substantial barriers to generalization, which makes the task particularly suitable for advanced models that can comprehend subtle causality and context shifts." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.753, + 0.493, + 0.945 + ], + "angle": 0, + "content": "From Table 4, it can be observed that the proposed Chimera demonstrates distinct advantages over existing approaches on the Political Twitter dataset. Compared to the third best performing method CoolNet, which achieved \\(71.32\\%\\) accuracy and \\(69.64\\%\\) F1 score, Chimera showcases a significant improvement. Similarly, MDCA, which performed with an accuracy of \\(71.38\\%\\) and an F1 score of \\(70.94\\%\\), still lags behind Chimera. Additionally, we observed that the discrepancy between accuracy and F1-score significantly narrows as accuracy increases, particularly when accuracy surpasses \\(70\\%\\). We hypothesize that the underlying cause may lie in the relatively balanced class distribution of sentiment categories (e.g., positive, neutral," + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.481, + 0.925, + 0.587 + ], + "angle": 0, + "content": "negative) within the Political Twitter test set (as shown in Table 3). At higher accuracy levels, the ratios of false positives to false negatives exhibit increasing symmetry across models. This equilibrium consequently reduces the divergence between precision and recall metrics, thereby causing the F1-score - defined as their harmonic mean - to naturally converge with accuracy." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.603, + 0.659, + 0.619 + ], + "angle": 0, + "content": "4.5 Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.622, + 0.923, + 0.709 + ], + "angle": 0, + "content": "To systematically investigate the influence of the linguistic-aware semantic alignment module, including semantic and impression rationale reasoning as well as object-level fine-grained alignment, on sentiment prediction, we conducted a series of ablation studies and the results are shown in Table 5." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.71, + 0.924, + 0.945 + ], + "angle": 0, + "content": "As presented in Table 5, the exclusion of semantic rationale (\"w/o SRG\") results in a noticeable performance decline across all three datasets. This effect is particularly pronounced on the Twitter-2017 and Political Twitter datasets, where nearly all evaluation metrics, including accuracy and F1 score, exhibit a reduction of approximately \\(2\\%\\). Similarly, the absence of impression rationale reasoning (\"w/o IRG\") results in performance fluctuations on the Twitter-2015 and Political Twitter datasets. However, the most noticeable effect is observed on the Twitter-2017 dataset, where the model's performance exhibits a significant degradation, particularly in the sentiment classification task, with nearly a \\(4\\%\\) drop in both accuracy and F1 score. The results (\"w/o IRG & AC\") reveal consistent performance degradation in both Accuracy and F1-score across all three datasets. Particularly noteworthy" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.519, + 0.045 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.921, + 0.043 + ], + "angle": 0, + "content": "11" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.056, + 0.916, + 0.126 + ], + "angle": 0, + "content": "TABLE 5 The results \\((\\%)\\) of the ablation study for our Chimera model are presented. The top-performing values emphasized in bold and the second-best values distinguished using underlined text. The notations \"w/o SRG,\" \"w/o IRG,\" and \"w/o SRG & IRG\" denote the exclusion of the respective generative tasks. \"w/o IRG & AC\" refers to the removal of IR generation task and replace the aesthetic caption (AC) with general caption. \"w/o LSA\" represents the removal of the Linguistic-aware Semantic Alignment branch, while \"w/o OD\" indicates the exclusion of object-level descriptions (e.g., facial descriptions and object-level aesthetic captions) from the input sequence." + }, + { + "type": "table", + "bbox": [ + 0.076, + 0.139, + 0.925, + 0.272 + ], + "angle": 0, + "content": "
MethodTwitter-2015Twitter-2017Political Twitter
AccF1AccF1AccF1AccF1AccF1AccF1AccF1AccF1AccF1
SCSRGIRGSCSRGIRGSCSRGIRG
Chimera81.6177.9881.1277.1177.5673.5575.6274.5975.0973.6471.9668.2372.5672.3271.6971.4069.3068.95
w/o SRG80.5276.10--75.8370.9673.5072.49--70.6667.2070.4369.88--68.2567.58
w/o IRG80.2375.2280.0375.42--71.8870.1672.670.73--71.1570.7071.0170.52--
w/o IRG & AC80.6776.0380.1176.46--71.5969.8372.2570.33--70.6270.0671.0470.47--
w/o SRG & IRG77.2471.82----71.2368.98----67.8867.20----
w/o LSA80.5477.0379.7576.2276.5272.0373.7270.9674.3872.2671.3667.8871.8671.3770.9270.5568.4367.99
w/o OD79.9676.0880.0976.3277.1272.8473.0670.8574.3772.3671.1167.5371.6471.1271.1270.7768.5568.07
w/o Aes-cap80.0375.2779.9476.0575.6971.0872.3671.6472.2871.2169.2865.4469.4368.9469.3769.0067.8567.27
" + }, + { + "type": "image", + "bbox": [ + 0.075, + 0.283, + 0.357, + 0.414 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.368, + 0.283, + 0.64, + 0.414 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.652, + 0.283, + 0.924, + 0.414 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.074, + 0.429, + 0.359, + 0.552 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.369, + 0.428, + 0.64, + 0.551 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.653, + 0.43, + 0.924, + 0.551 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.074, + 0.566, + 0.373, + 0.58 + ], + "angle": 0, + "content": "Fig. 2. Results \\((\\%)\\) on hyper-parameter of \\(\\alpha\\) and \\(\\lambda\\)." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.604, + 0.493, + 0.939 + ], + "angle": 0, + "content": "is the model's inferior performance on Twitter-2017 and Political Twitter datasets compared to the baseline(w/o IRG). However, an unexpected performance improvement emerges in Twitter-2015, surpassing even the configuration retaining aesthetic captions as input. This phenomenon may be attributed to dataset-specific characteristics in sample distribution. As detailed in Table 3, Twitter-2015 exhibits a significantly higher proportion of neutral-class samples compared to Twitter-2017 and Political Twitter. When the Chimera model is deprived of its reasoning abilities for both semantic and impression rationales (\"w/o SRG & IRG\"), its performance on sentiment classification declines to the lowest levels across all datasets. Specifically, a consistent reduction of approximately \\(4 - 5\\%\\) is observed in nearly all metrics, underscoring the essential role of rationale-based reasoning in enhancing the effectiveness and accuracy of sentiment analysis tasks. These results show that the influence of rationale reasoning differs across datasets. For Twitter-2017, with its balanced sentiment distribution (see Table 3), impression rationale has a greater impact on sentiment analysis. In contrast, both semantic and impression rationales contribute to the other two datasets, but neither is dominant." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.62, + 0.925, + 0.81 + ], + "angle": 0, + "content": "The LSA branch plays a pivotal role in the Chimera model by bridging the semantic gap between textual and visual modalities, ensuring effective alignment of information across visual and textual data. Its removal (w/o LSA) consistently leads to a significant decline in performance across all datasets, as evident in the ablation study. For instance, on Twitter-2015, the accuracy drops from \\(81.61\\%\\) to \\(80.54\\%\\), and the F1 score decreases from \\(77.98\\%\\) to \\(77.03\\%\\). Similarly, for Twitter-2017, accuracy, and F1 score dropped to \\(73.72\\%\\) and \\(70.96\\%\\), respectively. By aligning linguistic and visual features, the branch allows the model to effectively interpret semantic overlaps and contrasts, enabling more accurate sentiment predictions." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.812, + 0.925, + 0.945 + ], + "angle": 0, + "content": "Object-level descriptions (e.g., facial expressions and object-level aesthetic captions) enrich the input sequence by providing object-level detailed visual context. The ablation study reveals that removing OD (w/o OD) causes noticeable performance drops. On Twitter-2015, accuracy drops by 1.65 percentage points, and the F1 score decreases by 1.90 percentage points. Similarly, on Twitter-2017, accuracy is reduced by 2.56 percentage points, while the F1 score drops by 3.74 percentage points. Without the OD, the model" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.518, + 0.045 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.922, + 0.043 + ], + "angle": 0, + "content": "12" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.054, + 0.493, + 0.259 + ], + "angle": 0, + "content": "loses access to these fine-grained visual features, leading to diminished interpretability and accuracy, particularly in datasets where visual information plays a crucial role in determining sentiment. Additionally, the aesthetic caption is excluded from the input sequence to assess its impact on performance (w/o Aes-cap). As demonstrated in Table 5, the absence of aesthetic features results in a noteworthy decline in performance across all datasets, particularly in the impression rationale generation (IRG) task. This leads to Chimera exhibiting the poorest sentiment classification performance for IRG on the Twitter-2017 and Political Twitter datasets, which underscore the importance of aesthetic captions in guiding the model to generate coherent and emotionally nuanced impressions." + }, + { + "type": "title", + "bbox": [ + 0.073, + 0.273, + 0.308, + 0.287 + ], + "angle": 0, + "content": "4.6 Hyper-parameter Analysis" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.291, + 0.493, + 0.73 + ], + "angle": 0, + "content": "We conduct a hyperparameter analysis to explore the impact of \\(\\alpha\\) and \\(\\lambda\\) on the Chimera model's performance across the Twitter-2015, Twitter-2017, and Political Twitter datasets. Hyperparameter \\(\\alpha\\) regulates the balance between sentiment classification (SC) and rationale generation components (semantic and impression rationales, SRG, and IRG), while \\(\\lambda\\) controls the weight of patch-token alignment within the overall loss function. As shown in Figure 2, for all datasets, a lower \\(\\alpha\\), which assigns greater weight to rationale generation, generally improves model performance, with values around 0.1 to 0.2 achieving the highest accuracy and F1 scores. This emphasizes the significance of integrating semantic and impression rationales in MASC. As \\(\\alpha\\) increases, favoring SC loss, performance plateaus or declines, particularly for the Political Twitter dataset, indicating that reduced emphasis on rationale generation diminishes the model's ability to capture fine-grained sentiment context effectively. Moreover, the results indicate that increasing \\(\\lambda\\) initially enhances model performance, with diminishing returns beyond a certain threshold. For the Twitter-2015 and Political Twitter datasets, moderate \\(\\lambda\\) values [0.2, 0.5] achieve optimal accuracy and F1 scores, while higher values (\\(\\lambda > 0.6\\)) lead to performance stabilization or slight decline. This observation indicates that balanced alignment between visual and textual features enhances the model's interpretability and accuracy and excessively high \\(\\lambda\\) values may negatively impact performance, likely due to overemphasis on alignment at the expense of core sentiment classification. For Twitter-2017, a similar trend is observed, although performance variations are less pronounced." + }, + { + "type": "title", + "bbox": [ + 0.074, + 0.745, + 0.271, + 0.759 + ], + "angle": 0, + "content": "5 IN-DEPTH ANALYSIS" + }, + { + "type": "title", + "bbox": [ + 0.073, + 0.764, + 0.331, + 0.779 + ], + "angle": 0, + "content": "5.1 Quality Analysis of Rationale" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.782, + 0.491, + 0.943 + ], + "angle": 0, + "content": "Table 6 provides an evaluation of the sentiment rationale quality for both the ground-truth and Chimera-generated content, aiming to analyze their impact on sentiment analysis. A pre-trained sentiment classification model [92] is employed to assess the intuitive sentiment quality of these rationales across three test datasets by inputting the rationales into the model and analyzing the sentiment predictions. For both SR and IR, the results in the GroundTruth row represent the upper performance bound. It is evident that the ground truth performance for SR significantly exceeds that of IR, indicating that semantic rationales" + }, + { + "type": "table_caption", + "bbox": [ + 0.52, + 0.055, + 0.909, + 0.091 + ], + "angle": 0, + "content": "TABLE 6 The evaluation results \\((\\%)\\) of rationale quality. The best-performing results highlighted in bold." + }, + { + "type": "table", + "bbox": [ + 0.508, + 0.104, + 0.921, + 0.248 + ], + "angle": 0, + "content": "
Rationale SourceTwitter-2015Twitter-2017Political
AccF1AccF1AccF1
Semantic Rationale
Ground-Truth99.0499.0498.5498.5497.6497.64
Chimera80.9180.8375.0474.9370.2070.14
Impression Rationale
Ground-Truth69.9169.9072.7772.7176.876.87
Chimera63.4563.6561.6759.3860.5460.12
" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.264, + 0.921, + 0.42 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.505, + 0.434, + 0.921, + 0.448 + ], + "angle": 0, + "content": "Fig. 3. Human evaluation of factuality, clarity and fluency for SR and IR." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.475, + 0.923, + 0.693 + ], + "angle": 0, + "content": "are more critical for this task than impression rationales. We hypothesize that two factors contribute to this discrepancy. Firstly, as illustrated in Table 3, semantic rationales are shorter in length and straightforward, facilitating easy comprehension, while the emotions elicited by images are inherently more abstract and multifaceted. Secondly, the IR's reliance on visual cues contrasts sharply with the Twitter dataset's text-centric sentiment distribution. Prior research has shown that a considerable majority of targets (around \\(58\\%\\)) are absent from images [13], and most targets (93% in Twitter-2015) exhibit emotional coherence with their textual counterparts [93]. This misalignment underscores the dataset's limitations in evaluating IRs and necessitates a nuanced understanding of the interplay between visual and textual sentiment representations." + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.695, + 0.923, + 0.913 + ], + "angle": 0, + "content": "A total of 180 samples were randomly selected for human evaluation, with 100 samples drawn from the training set, 40 from the testing set, and 40 from the validation set of both the Twitter-2015 and Twitter-2017 datasets. Four native English speakers with Master's degrees in the arts were recruited to assess the quality of the rationale data based on three criteria: (1) factuality, evaluating whether the rationale is grounded in accurate and verifiable information; (2) clarity, assessing the logical structure and comprehensibility of the rationale; and (3) fluency, measuring the grammatical accuracy and smoothness of the language used. The Fleiss' Kappa \\((\\kappa)\\) values for the initial evaluation across the four raters were as follows: factuality \\(\\kappa = 0.922\\), clarity \\(\\kappa = 0.945\\), and fluency \\(\\kappa = 0.960\\). In cases of disagreement, the evaluators engaged in discussions to reach a consensus." + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.914, + 0.924, + 0.944 + ], + "angle": 0, + "content": "Figure 3 presents the results of the human evaluation. It can be observed that SR consistently exhibits higher quality" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.519, + 0.045 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "13" + }, + { + "type": "image", + "bbox": [ + 0.073, + 0.052, + 0.925, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.073, + 0.306, + 0.738, + 0.321 + ], + "angle": 0, + "content": "Fig. 4. Assessment of sentiment intensity for SR and IR in both ground truth data and Chimera-generated content." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.344, + 0.494, + 0.492 + ], + "angle": 0, + "content": "across all metrics, which verifies that the employed LLM is capable of generating appropriate rationale data for specific tasks when provided with concrete ground-truth labels. In comparison to SR, IR demands a more in-depth understanding of visual content and is inherently more subjective. Consequently, IR is more prone to issues of factuality and clarity, as interpreting the abstract aesthetic and emotional elements conveyed by an image often involves subjective reasoning, which may lead to misalignment with objective ground truths or human expectations." + }, + { + "type": "title", + "bbox": [ + 0.073, + 0.502, + 0.37, + 0.517 + ], + "angle": 0, + "content": "5.2 Quantitative Analysis of Rationale" + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.52, + 0.491, + 0.681 + ], + "angle": 0, + "content": "We conduct a quantitative analysis on the test sets of ground truth and Chimera-generated content to examine the impact of varying levels of sentiment intensity in cognitive rationales on the accuracy of sentiment prediction, including their potential to amplify or diminish predictive performance. As illustrated in Figure 4, the sentiment intensity distributions of Twitter-2015 and Twitter-2017 reveal distinct patterns. Specifically, the sentiment intensity of IR demonstrates a noticeable bias toward positive values, whereas the sentiment intensity of SR aligns more closely with the sentiment polarity label distribution presented in Table 3." + }, + { + "type": "text", + "bbox": [ + 0.072, + 0.681, + 0.493, + 0.945 + ], + "angle": 0, + "content": "This observation suggests that IR demonstrates a bias toward positive samples, increasing the model's confidence in predicting positive instances. While this bias may be beneficial for datasets with a higher proportion of positive samples (e.g., Twitter-2017), it could lead to additional bias in datasets with a limited representation of positive samples. This finding is further corroborated by the ablation study results, which reveal that the performance of the Chimera model without IR is worse on Twitter-2017 compared to its performance on Twitter-2015. Another notable observation is that, for the ground truth of the Political Twitter dataset, the sentiment intensity distribution of IR is relatively uniform across all ranges. In contrast, the Chimera-generated content for IR exhibits a more distinguishable sentiment intensity distribution compared to the ground truth, which further validates the quality of SR, the effectiveness of the proposed Chimera training paradigm, and the robustness of Chimera's performance." + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.344, + 0.925, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.504, + 0.538, + 0.924, + 0.565 + ], + "angle": 0, + "content": "Fig. 5. Visualization of the top 15 most frequent aesthetic-related words in generated IR." + }, + { + "type": "title", + "bbox": [ + 0.504, + 0.588, + 0.873, + 0.604 + ], + "angle": 0, + "content": "5.3 Impact of Aesthetic Attributes on Sentiment" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.607, + 0.925, + 0.945 + ], + "angle": 0, + "content": "To investigate the impact of image aesthetic attributes on sentiment analysis, we visualize the frequency of aesthetic-related words within the impression rationales generated by our proposed Chimera model and its variant \"Chimera w/o Aes-cap\" on the Twitter-2015 and Twitter-2017 test sets. Specifically, we visualize the top 15 most frequent aesthetic-related words within the generated IR, based on the aesthetic attributes defined by Milena et al. [94]. As shown in Figure 5, the frequency analysis of aesthetic-related words for Chimera on Twitter-2015 and Twitter-2017 reveals that \"visual,\" \"vibrant,\" \"focus,\" and \"design\" prominently appear across both datasets. These terms, associated with visual clarity, expressive quality, image composition, and cohesiveness, align with the model's improved accuracy and F1 scores. However, excluding the aesthetic caption from the input results in subtle shifts in the frequency distribution of these aesthetic-related terms. For Twitter-2015, the overall frequency distribution of aesthetic-related terms shows minimal change, with a slight increase in \"focus\" and a decrease in \"vibrant\". In contrast, for Twitter-2017, \"focus\" shows a significant increase, while \"visual\" and \"vibrant\" decrease notably. Combined with the ablation study results in Table 5, the absence of aesthetic captions in the input" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.519, + 0.045 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "14" + }, + { + "type": "table", + "bbox": [ + 0.097, + 0.055, + 0.903, + 0.695 + ], + "angle": 0, + "content": "
Input image-text pair & Auxiliary sentencesMethodSemantic Rationale (SR of GT, Chimera and ChatGPT-4o) Reasoning Cause(RC of MDCA)Impression Rationale (IR of GT and Chimera)/ Direct Cause(DC of MDCA)Sentiment
RT @ MamiHerald : [Joanne Stiger]POS and Melissa Keller are the first same - sex couple to be married in Broward :Ground Truth (GT)Based on the image-text pair, the sentiment towards Joanne Stiger in the given text and image is positive, because the text highlights a historic and celebratory event of her marriage, and the image shows people smiling and appearing joyful, indicating happiness and approval.The image emanates a joyous and celebratory atmosphere, highlighted by the smiling faces and relaxed postures of the individuals present. The presence of photographers and the gathering of people around the couple enhance the sense of an important and positive milestone being reached. Overall, the visual conveys happiness, love, and a significant moment being warmly embraced by all involved.Positive
ChimeraThe sentiment towards Joanne Stiger in the given text and image is positive, because the text highlights her as the first same-sex couple to be married in Broward, and the image shows her smiling and looking happy, which conveys a sense of pride and joy.The image depicts two individuals who appear relaxed and happy, with one person appearing content and relaxed. The overall impression is one of happiness and contentment. The setting and body language suggest a sense of happiness, together with a pleasant and celebratory atmosphere.Positive
Aesthetic caption: The body language of the two people shows that they are in a very different way. The background shows a room with a lot of people and the cameraman is in the background.MDCABecause the tweet simply reports on a factual event without expressing any emotional response towards Joanne Stiger. The use of the word "first" suggests that the author is excited about the news and is highlighting the fact that Stiger and Keller are the first same-sex couple to be married in Broward. Overall, the tweet does not provide enough information to determine a positive or negative sentiment towards JoAnn Stiger herself.Because joanne stiger and Melissa keller are the first same - sex couple to be married in bronward.Neutral
GPT-4oBased on the image-text pair, the sentiment towards Joanne Stiger in the given text and image is Positive, because both the faces of Joanne Stiger and Melissa Keller, as well as the individuals around them, display expressions of happiness and joy, indicating a celebratory and momentous occasion.Positive
Primary Football Cup . [St Bede]' s v St Augustines friendly . . . both out but now for the friendly # sportsmanshipGround TruthBased on the image-text pair, the sentiment towards St Bede in the given text and image is neutral, because the text describes the event as a friendly match with a focus on sportsmanship, and the image shows a regular football game without any emotional or evaluative expressions towards St Bede.The image depicts a friendly football match between St Bede's and St Augustine's on a lush green field under a partly cloudy sky. The overall scene conveys a sense of camaraderie and sportsmanship, aligning well with the neutral sentiment label. The focus appears to be on enjoying the game and fostering friendly competition in a relaxed atmosphere.Neutral
ChimeraThe sentiment towards St Bede in the given text and image is neutral, because the text simply states that St Bede is participating in a friendly match without expressing any positive or negative emotions, and the image shows a standard football match scene without any clear emotional cues.The image of the St Bede vs. St Augustines football match conveys a sense of camaraderie and sportsmanship. The players are engaged in a moment of play, suggesting a positive and supportive atmosphere. The neutral sentiment label aligns with the overall feeling of cambered athleticism and teamwork, without evoking strong emotions.Neutral
Aesthetic caption: The position of the camera captures the players in action, as they are engaged in a game of soccer. the position of the camera captures the players in action, and the position of the camera captures the players in action.MDCABecause the writer is promoting the team's friendly football match with St. Albers, which suggests that they are a fan of the team. The use of the hashtag #sportsmanship also suggests that the writer has a positive opinion of St. Bede. Overall, the language and tone of the tweet suggest that the sentiment towards St. Bede is positive.It is a friendly football cup.Positive
GPT-4oBased on the image-text pair, the sentiment towards St Bede in the given text and image is Positive, because the text highlights a friendly and sportsmanlike attitude despite both teams being out of the competition, and the image shows players engaging in the match, fostering a positive environment.Positive
[Michael Oer]NEG is latest reminder that NFL pain lasts 24 / 7 / 365Ground TruthBased on the image-text pair, the sentiment towards Michael Oer in the given text and image is negative, because the text highlights the constant and enduring pain associated with his NFL career, implying a prolonged and difficult experience.The image of Michael Oer in his NFL uniform, combined with the sentiment label "negative", conveys a sense of enduring struggle and fatigue. His posture and facial expression suggest a feeling of exhaustion or frustration, which aligns with the text's implication that the pain and challenges of an NFL career are relentless and unforgiving. The overall impression is one of continuous physical and emotional burden.Negative
ChimeraThe sentiment towards Michael Oer in the given text and image is negative, because the text highlights that his recent reminder that NFL pain lasts 24/7/365, which is a serious and distressing reminder.The image of Michael Oer exudes a sense of determination and focus, aligning with the negative sentiment label. His serious expression and the context of NFL pain underscore a feeling of exhaustion and exhaustion, which complements the idea of prolonged pain and the accompanying reminder that "no matter how much you love it," evoking a negative feeling.Negative
MDCABecause the tweet does not express any clear positive or negative opinion towards him. The tweet simply states that he is a reminder that NFL pain lasts for a specific time period. There is no indication of any emotional attachment or opinion towards Michael Oer in the tweet, and the language used is informative rather than emotional. Therefore, the sentiment towards Michael Oer in this tweet is neutral.He is reminder that NFL pain lasts 24 / 7 / 365.Neutral
GPT-4oBased on the image-text pair, the sentiment towards Michael Oer in the given text and image is Negative, because the text highlights the unending pain associated with NFL careers, using Michael Oer as an example, and alludes to the physical and possibly emotional toll he endures.Negative
" + }, + { + "type": "image_caption", + "bbox": [ + 0.073, + 0.71, + 0.924, + 0.747 + ], + "angle": 0, + "content": "Fig. 6. Three examples showcasing the predictions generated by Chimera, MDCA, and GPT-4o are presented for analysis. During the evaluation process, GPT-4o exclusively produces the semantic rationale (SR). The input image-text pair and auxiliary sentences are utilized solely by Chimera. For MDCA, the reasoning cause (RC), direct cause (DC), and sentiment prediction are derived through direct inference." + }, + { + "type": "text", + "bbox": [ + 0.073, + 0.771, + 0.492, + 0.933 + ], + "angle": 0, + "content": "leads to the worst sentiment analysis performance across all datasets on IRG. This highlights the critical role of aesthetic captions in enhancing the model's understanding of image aesthetics, particularly in datasets like Twitter-2017 with balanced sentiment distributions. Specifically, attributes such as \"visual\" and \"vibrant\" positively contribute to sentiment analysis performance, whereas \"focus\" appears to significantly impair it. We speculate that since \"focus\" emphasizes specific image elements, potentially leads to an unbalanced interpretation of visual content. This localized emphasis can narrow the model's analytical scope, prioritizing details at" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.771, + 0.923, + 0.83 + ], + "angle": 0, + "content": "the expense of broader context and compositional harmony. Consequently, the model may struggle to capture holistic aesthetic and emotional cues essential for accurate sentiment classification." + }, + { + "type": "title", + "bbox": [ + 0.505, + 0.85, + 0.857, + 0.866 + ], + "angle": 0, + "content": "5.4 Comparison with Large Language Models" + }, + { + "type": "text", + "bbox": [ + 0.504, + 0.87, + 0.924, + 0.943 + ], + "angle": 0, + "content": "We evaluate the performance of GPT-4o on the MASC task under a zero-shot setting. As shown in Table 7, GPT-4o achieves an accuracy of \\(46.87\\%\\) and an F1 score of \\(47.47\\%\\), which is substantially lower than Chimera, which reports \\(81.61\\%\\) accuracy and \\(77.98\\%\\) F1 score. On the Twitter-2017" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.077, + 0.034, + 0.518, + 0.044 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "15" + }, + { + "type": "table_caption", + "bbox": [ + 0.078, + 0.056, + 0.488, + 0.124 + ], + "angle": 0, + "content": "TABLE 7 The experimental results \\((\\%)\\) of GPT-4o on the MASC task under a zero-shot setting are presented. The best-performing results highlighted in bold. The term \"dis\" refers to the percentage of samples where the sentiment polarity associated with a specific aspect cannot be discerned." + }, + { + "type": "table", + "bbox": [ + 0.078, + 0.136, + 0.488, + 0.23 + ], + "angle": 0, + "content": "
MethodTwitter-2015Twitter-2017
AccF1DisAccF1Dis
Chimera81.6177.98-75.6274.59-
GPT-4o46.8747.470.256.0853.280.5
GPT-4o w/o image67.0262.38-59.6460.35-
" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.274, + 0.49, + 0.623 + ], + "angle": 0, + "content": "dataset, GPT-4o shows an improvement with an accuracy of \\(56.08\\%\\) and an F1 score of \\(53.28\\%\\). However, this performance still trails behind Chimera, which reports \\(75.62\\%\\) accuracy and \\(74.59\\%\\) F1 score. Surprisingly, removing the image input results in an improvement in the model's accuracy and F1 score, reaching \\(67.02\\%\\) and \\(62.38\\%\\) on the Twitter-2015 dataset, respectively. This observation contrasts sharply with the phenomenon observed in the baseline model. Similarly, in the Twitter-2017 dataset, the performance of GPT-4o without image input is slightly better than with the image input. We speculate that in task-specific models, incorporating image data typically improves sentiment classification performance, as these models are finetuned to leverage multi-modal inputs effectively. However, in a zero-shot setting, GPT-4o operates based on its general pre-trained knowledge, which may not be fully optimized for combining textual and visual inputs for sentiment analysis. In this setting, adding image input may introduce noise rather than meaningful information. Moreover, GPT-4o has a low Dis value on both datasets, which slightly decreases to 0 when the image input is removed. This further suggests that the model's ability to distinguish sentiment polarity is, to a certain extent, influenced by the inclusion of the visual modality." + }, + { + "type": "title", + "bbox": [ + 0.077, + 0.646, + 0.2, + 0.661 + ], + "angle": 0, + "content": "5.5 Case Study" + }, + { + "type": "text", + "bbox": [ + 0.078, + 0.666, + 0.49, + 0.942 + ], + "angle": 0, + "content": "An additional case study is performed to provide a more comprehensive evaluation of the effectiveness of the proposed Chimera model. Figure 6 illustrates three representative examples, each corresponding to positive, neutral, and negative samples, respectively. As illustrated in the first example, MDCA is the sole model to predict \"Neutral\" for the target \"Joanne Stiger,\" whereas the other three models accurately predict \"Positive\". This result is primarily due to the RC and DC generated by MDCA, which lack the expression of positive or negative sentiment. Notably, the RC predominantly emphasizes the textual content, overlooking the joyful atmosphere conveyed through the image. In the second example, an intriguing observation is that the situation is the exact opposite of the previous case. Here, only Chimera correctly predicts the sentiment polarity of the specific target, \"St. Bede\" as \"Neutral\" whereas both GPT-4o and MDCA incorrectly classify it as \"Positive\". It is observed that the SR of GPT-4o and the RC of MDCA both convey a positive sentiment, largely due" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.055, + 0.921, + 0.287 + ], + "angle": 0, + "content": "to an overinterpretation and extrapolation of the textual content. In contrast, Chimera demonstrates accurate prediction by appropriately integrating a balanced understanding of the image content and its aesthetic attributes. In the final example, both Chimera and GPT-4o accurately identify the sentiment polarity of \"Michael Oher\" as \"Negative\". MDCA's incorrect prediction of \"Neutral\" may be attributed to its generated RC and DC failing to account for the individual's expression, thereby overlooking critical semantic cues present in the visual content. With the aid of facial descriptions, Chimera effectively captures and aligns fine-grained emotional cues from visual content, enabling it to generate coherent SR and IR and achieve accurate predictions. The above representative instances further verify that incorporating cognitive and aesthetic sentiment causality enhances sentiment classification accuracy in MABSA." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.312, + 0.645, + 0.327 + ], + "angle": 0, + "content": "6 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.334, + 0.921, + 0.61 + ], + "angle": 0, + "content": "In this paper, we propose a cognitive sentiment causality understanding framework tailored for multimodal aspect-based sentiment classification. The framework, which is novel in its approach, consists of four primary components: linguistic-aware semantic alignment, a translation module, rationale dataset construction, and rationale-aware learning. The linguistic-aware semantic alignment component facilitates visual patch-token level alignment through dynamic patch selection and semantic patch calibration. The translation module transforms holistic image and object-level visual information into corresponding emotion-laden textual representations. The rationale dataset construction involves designing refined prompts and leveraging LLMs to generate semantic and impression rationale. Finally, rationale-aware learning incorporates semantic explanations and affective-cognitive resonance to enhance the model's capacity to understand cognitive sentiment causality. Experimental results on three Twitter datasets demonstrate that the proposed Chimera achieves performance gains over SOTA baselines." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.636, + 0.68, + 0.65 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.658, + 0.921, + 0.847 + ], + "angle": 0, + "content": "This research is supported by the Shanghai Science and Technology Innovation Action Plan (No. 24YF2710100), the Shanghai Special Project to Promote High-quality Industrial Development (No. RZ-CYAI-01-24-0288), the National Nature Science Foundation of China (No. 62477010), the Science and Technology Commission of Shanghai Municipality Grant (No. 22511105901, No. 21511100402), the Ministry of Education, Singapore under its MOE Academic Research Fund Tier 2 (STEM RIE2025 Award MOE-T2EP20123-0005) and by the RIE2025 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) (Award I2301E0026), administered by A\\*STAR, as well as supported by Alibaba Group and NTU Singapore." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.872, + 0.619, + 0.886 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.509, + 0.896, + 0.922, + 0.942 + ], + "angle": 0, + "content": "[1] R. Mao, Q. Liu, K. He, W. Li, and E. Cambria, \"The biases of pretrained language models: An empirical study on prompt-based sentiment analysis and emotion detection,\" IEEE Transactions on Affective Computing, vol. 14, no. 3, pp. 1743-1753, 2023." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.519, + 0.045 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "16" + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.055, + 0.492, + 0.09 + ], + "angle": 0, + "content": "[2] K. Du, F. Xing, R. Mao, and E. Cambria, \"Financial sentiment analysis: Techniques and applications,\" ACM Computing Surveys, vol. 56, no. 9, pp. 1-42, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.091, + 0.492, + 0.125 + ], + "angle": 0, + "content": "[3] R. Mao, M. Ge, S. Han, W. Li, K. He, L. Zhu, and E. Cambria, \"A survey on pragmatic processing techniques,\" Information Fusion, vol. 114, p. 102712, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.126, + 0.492, + 0.171 + ], + "angle": 0, + "content": "[4] L. Xiao, Y. Xue, H. Wang, X. Hu, D. Gu, and Y. Zhu, \"Exploring fine-grained syntactic information for aspect-based sentiment classification with dual graph neural networks,\" Neurocomputing, vol. 471, pp. 48-59, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.171, + 0.492, + 0.205 + ], + "angle": 0, + "content": "[5] Y. Ma, R. Mao, Q. Lin, P. Wu, and E. Cambria, \"Quantitative stock portfolio optimization by multi-task learning risk and return,\" Information Fusion, vol. 104, p. 102165, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.206, + 0.492, + 0.251 + ], + "angle": 0, + "content": "[6] K. Du, F. Xing, R. Mao, and E. Cambria, \"FinSenticNet: A concept-level lexicon for financial sentiment analysis,\" in 2023 IEEE Symposium Series on Computational Intelligence (SSCI). IEEE, 2023, pp. 109-114." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.252, + 0.492, + 0.298 + ], + "angle": 0, + "content": "[7] X. Zhang, R. Mao, and E. Cambria, \"SenticVec: Toward robust and human-centric neurosymbolic sentiment analysis,\" in Findings of the Association for Computational Linguistics: ACL. Association for Computational Linguistics, 2024, pp. 4851-4863." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.299, + 0.492, + 0.344 + ], + "angle": 0, + "content": "[8] S. Zhao, M. Jia, L. A. Tuan, F. Pan, and J. Wen, \"Universal vulnerabilities in large language models: Backdoor attacks for incontext learning,\" in Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, 2024, pp. 11507-11522." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.344, + 0.492, + 0.389 + ], + "angle": 0, + "content": "[9] L. Zhu, R. Mao, E. Cambria, and B. J. Jansen, \"Neurosymbolic AI for personalized sentiment analysis,\" in Proceedings of International Conference on Human-Computer Interaction (HCII), 2024, pp. 269-290." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.39, + 0.492, + 0.436 + ], + "angle": 0, + "content": "[10] S. Zhao, J. Wen, A. Luu, J. Zhao, and J. Fu, \"Prompt as triggers for backdoor attack: Examining the vulnerability in language models,\" in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, 2023, pp. 12303-12317." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.436, + 0.492, + 0.481 + ], + "angle": 0, + "content": "[11] J. YU and J. JIANG, \"Adapting bert for target-oriented multimodal sentiment classification.(2019),\" in Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, 2019, pp. 5408-5414." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.482, + 0.492, + 0.528 + ], + "angle": 0, + "content": "[12] J. Yu, J. Jiang, and R. Xia, \"Entity-sensitive attention and fusion network for entity-level multimodal sentiment classification,\" IEEE/ACM Transactions on Audio, Speech, and Language Processing, vol. 28, pp. 429-439, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.529, + 0.492, + 0.563 + ], + "angle": 0, + "content": "[13] J. Yu, J. Wang, R. Xia, and J. Li, \"Targeted multimodal sentiment classification based on coarse-to-fine grained image-target matching.\" in *IJCAI*, 2022, pp. 4482-4488." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.564, + 0.492, + 0.609 + ], + "angle": 0, + "content": "[14] Y. Ling, J. Yu, and R. Xia, \"Vision-language pre-training for multimodal aspect-based sentiment analysis,\" in Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2022, pp. 2149-2159." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.609, + 0.492, + 0.643 + ], + "angle": 0, + "content": "[15] L. Yang, J.-C. Na, and J. Yu, \"Cross-modal multitask transformer for end-to-end multimodal aspect-based sentiment analysis,\" Information Processing & Management, vol. 59, no. 5, p. 103038, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.643, + 0.492, + 0.689 + ], + "angle": 0, + "content": "[16] R. Zhou, W. Guo, X. Liu, S. Yu, Y. Zhang, and X. Yuan, \"Aom: Detecting aspect-oriented information for multimodal aspect-based sentiment analysis,\" in Findings of the Association for Computational Linguistics: ACL 2023, 2023, pp. 8184-8196." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.689, + 0.492, + 0.734 + ], + "angle": 0, + "content": "[17] Z. Khan and Y. Fu, \"Exploiting bert for multimodal target sentiment classification through input space translation,\" in Proceedings of the 29th ACM international conference on multimedia, 2021, pp. 3034-3042." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.735, + 0.492, + 0.781 + ], + "angle": 0, + "content": "[18] L. Xiao, E. Zhou, X. Wu, S. Yang, T. Ma, and L. He, \"Adaptive multi-feature extraction graph convolutional networks for multimodal target sentiment analysis,\" in 2022 IEEE International Conference on Multimedia and Expo (ICME). IEEE, 2022, pp. 1-6." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.781, + 0.492, + 0.827 + ], + "angle": 0, + "content": "[19] L. Xiao, X. Wu, S. Yang, J. Xu, J. Zhou, and L. He, \"Cross-modal fine-grained alignment and fusion network for multimodal aspect-based sentiment analysis,\" Information Processing & Management, vol. 60, no. 6, p. 103508, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.828, + 0.492, + 0.873 + ], + "angle": 0, + "content": "[20] Y. Huang, Z. Chen, J. Chen, J. Z. Pan, Z. Yao, and W. Zhang, \"Target-oriented sentiment classification with sequential cross-modal semantic graph,\" in International Conference on Artificial Neural Networks. Springer, 2023, pp. 587-599." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.873, + 0.492, + 0.919 + ], + "angle": 0, + "content": "[21] Q. Wang, H. Xu, Z. Wen, B. Liang, M. Yang, B. Qin, and R. Xu, \"Image-to-text conversion and aspect-oriented filtration for multimodal aspect-based sentiment analysis,\" IEEE Transactions on Affective Computing, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.919, + 0.492, + 0.943 + ], + "angle": 0, + "content": "[22] L. Xiao, X. Wu, J. Xu, W. Li, C. Jin, and L. He, \"Atlantis: Aesthetic-oriented multiple granularities fusion network for joint multi-" + }, + { + "type": "list", + "bbox": [ + 0.077, + 0.055, + 0.492, + 0.943 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.536, + 0.056, + 0.923, + 0.08 + ], + "angle": 0, + "content": "modal aspect-based sentiment analysis,\" Information Fusion, vol. 106, p. 102304, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.08, + 0.923, + 0.126 + ], + "angle": 0, + "content": "[23] H. Yang, Y. Zhao, and B. Qin, \"Face-sensitive image-to-emotional-text cross-modal translation for multimodal aspect-based sentiment analysis,\" in Proceedings of the 2022 conference on empirical methods in natural language processing, 2022, pp. 3324-3335." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.127, + 0.923, + 0.172 + ], + "angle": 0, + "content": "[24] R. Fan, T. He, M. Chen, M. Zhang, X. Tu, and M. Dong, \"Dual causes generation assisted model for multimodal aspect-based sentiment classification,\" IEEE Transactions on Neural Networks and Learning Systems, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.172, + 0.923, + 0.219 + ], + "angle": 0, + "content": "[25] J. Wang, Z. Li, J. Yu, L. Yang, and R. Xia, \"Fine-grained multimodal named entity recognition and grounding with a generative framework,\" in Proceedings of the 31st ACM International Conference on Multimedia, 2023, pp. 3934-3943." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.22, + 0.923, + 0.266 + ], + "angle": 0, + "content": "[26] X. Zhang, R. Mao, K. He, and E. Cambria, \"Neurosymbolic sentiment analysis with dynamic word sense disambiguation,\" in Findings of the Association for Computational Linguistics: EMNLP 2023, Singapore, 2023, pp. 8772-8783." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.266, + 0.923, + 0.311 + ], + "angle": 0, + "content": "[27] Q. Lu, X. Sun, Y. Long, Z. Gao, J. Feng, and T. Sun, \"Sentiment analysis: Comprehensive reviews, recent advances, and open challenges,\" IEEE Transactions on Neural Networks and Learning Systems, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.312, + 0.923, + 0.357 + ], + "angle": 0, + "content": "[28] H. Liu, W. Wang, and H. Li, \"Interpretable multimodal misinformation detection with logic reasoning,\" in Findings of the Association for Computational Linguistics: ACL 2023, 2023, pp. 9781-9796." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.359, + 0.923, + 0.406 + ], + "angle": 0, + "content": "[29] R. Mao, K. Du, Y. Ma, L. Zhu, and E. Cambria, \"Discovering the cognition behind language: Financial metaphor analysis with MetaPro,\" in 2023 IEEE International Conference on Data Mining (ICDM). IEEE, 2023, pp. 1211-1216." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.406, + 0.923, + 0.464 + ], + "angle": 0, + "content": "[30] E. Cambria, X. Zhang, R. Mao, M. Chen, and K. Kwok, \"SenticNet 8: Fusing emotion AI and commonsense AI for interpretable, trustworthy, and explainable affective computing,\" in Proceedings of International Conference on Human-Computer Interaction (HCI), Washington DC, USA, 2024, pp. 197-216." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.465, + 0.923, + 0.511 + ], + "angle": 0, + "content": "[31] K. Du, R. Mao, F. Xing, and E. Cambria, \"Explainable stock price movement prediction using contrastive learning,\" in Proceedings of the 33rd ACM International Conference on Information and Knowledge Management (CIKM), Idaho, USA, 2024, pp. 529-537." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.511, + 0.923, + 0.546 + ], + "angle": 0, + "content": "[32] H. Zhang, X. Zhou, Z. Shen, and Y. Li, \"Privfr: Privacy-enhanced federated recommendation with shared hash embedding,\" IEEE Transactions on Neural Networks and Learning Systems, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.546, + 0.923, + 0.591 + ], + "angle": 0, + "content": "[33] E. Yang, L. Shen, G. Guo, X. Wang, X. Cao, J. Zhang, and D. Tao, \"Model merging in llms, mllms, and beyond: Methods, theories, applications and opportunities,\" arXiv preprint arXiv:2408.07666, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.592, + 0.923, + 0.649 + ], + "angle": 0, + "content": "[34] L. Xiao, R. Mao, X. Zhang, L. He, and E. Cambria, \"Vanessa: Visual connotation and aesthetic attributes understanding network for multimodal aspect-based sentiment analysis,\" in Findings of the Association for Computational Linguistics: EMNLP 2024, 2024, pp. 11486-11500." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.65, + 0.923, + 0.719 + ], + "angle": 0, + "content": "[35] J. Kruk, J. Lubin, K. Sikka, X. Lin, D. Jurafsky, and A. Divakaran, \"Integrating text and image: Determining multimodal document intent in instagram posts,\" in Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), 2019, pp. 4622-4632." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.719, + 0.923, + 0.767 + ], + "angle": 0, + "content": "[36] H. Liu, W. Wang, and H. Li, \"Towards multi-modal sarcasm detection via hierarchical congruity modeling with knowledge enhancement,\" in Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, 2022, pp. 4995-5006." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.767, + 0.923, + 0.813 + ], + "angle": 0, + "content": "[37] R. Mao and X. Li, \"Bridging towers of multi-task learning with a gating mechanism for aspect-based sentiment analysis and sequential metaphor identification,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, 2021, pp. 13534-13542." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.813, + 0.923, + 0.849 + ], + "angle": 0, + "content": "[38] T. Yue, R. Mao, H. Wang, Z. Hu, and E. Cambria, \"KnowleNet: Knowledge fusion network for multimodal sarcasm detection,\" Information Fusion, vol. 100, p. 101921, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.849, + 0.923, + 0.883 + ], + "angle": 0, + "content": "[39] C. Fan, J. Lin, R. Mao, and E. Cambria, \"Fusing pairwise modalities for emotion recognition in conversations,\" Information Fusion, vol. 106, p. 102306, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.884, + 0.923, + 0.93 + ], + "angle": 0, + "content": "[40] L. Yang, Z. Wang, Z. Li, J.-C. Na, and J. Yu, \"An empirical study of multimodal entity-based sentiment analysis with chatgpt: Improving in-context learning via entity-aware contrastive learning,\" Information Processing & Management, vol. 61, no. 4, p. 103724, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.93, + 0.923, + 0.943 + ], + "angle": 0, + "content": "[41] L. Yang, J. Wang, J.-C. Na, and J. Yu, \"Generating paraphrase sen" + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.056, + 0.923, + 0.943 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.519, + 0.045 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.923, + 0.043 + ], + "angle": 0, + "content": "17" + }, + { + "type": "ref_text", + "bbox": [ + 0.104, + 0.055, + 0.492, + 0.08 + ], + "angle": 0, + "content": "tences for multimodal entity-category-sentiment triple extraction,\" Knowledge-Based Systems, vol. 278, p. 110823, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.08, + 0.492, + 0.116 + ], + "angle": 0, + "content": "[42] J. Zhou, J. Zhao, J. X. Huang, Q. V. Hu, and L. He, \"Masad: A large-scale dataset for multimodal aspect-based sentiment analysis,\" Neurocomputing, vol. 455, pp. 47-58, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.116, + 0.492, + 0.163 + ], + "angle": 0, + "content": "[43] W. Zhang, X. Li, Y. Deng, L. Bing, and W. Lam, \"A survey on aspect-based sentiment analysis: Tasks, methods, and challenges,\" IEEE Transactions on Knowledge and Data Engineering, vol. 35, no. 11, pp. 11019-11038, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.163, + 0.492, + 0.21 + ], + "angle": 0, + "content": "[44] X. Ju, D. Zhang, R. Xiao, J. Li, S. Li, M. Zhang, and G. Zhou, \"Joint multi-modal aspect-sentiment analysis with auxiliary cross-modal relation detection,\" in Proceedings of the 2021 conference on empirical methods in natural language processing, 2021, pp. 4395-4405." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.21, + 0.492, + 0.257 + ], + "angle": 0, + "content": "[45] J. Mu, F. Nie, W. Wang, J. Xu, J. Zhang, and H. Liu, \"Mocolnet: A momentum contrastive learning network for multimodal aspect-level sentiment analysis,\" IEEE Transactions on Knowledge and Data Engineering, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.257, + 0.492, + 0.315 + ], + "angle": 0, + "content": "[46] F. Zhao, C. Li, Z. Wu, Y. Ouyang, J. Zhang, and X. Dai, \"M2df: Multi-grained multi-curriculum denoising framework for multimodal aspect-based sentiment analysis,\" in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, 2023, pp. 9057-9070." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.316, + 0.492, + 0.352 + ], + "angle": 0, + "content": "[47] E. Cambria, R. Mao, M. Chen, Z. Wang, and S.-B. Ho, \"Seven pillars for the future of artificial intelligence,\" IEEE Intelligent Systems, vol. 38, no. 6, pp. 62-69, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.352, + 0.492, + 0.375 + ], + "angle": 0, + "content": "[48] R. Arnheim, Art and visual perception: A psychology of the creative eye. Univ of California Press, 1954." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.376, + 0.492, + 0.412 + ], + "angle": 0, + "content": "[49] V. S. Ramachandran and W. Hirstein, \"The science of art: A neurological theory of aesthetic experience,\" Journal of Consciousness Studies, vol. 6, no. 6-7, pp. 15-51, 1999." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.412, + 0.492, + 0.447 + ], + "angle": 0, + "content": "[50] H. Zeng, Z. Cao, L. Zhang, and A. C. Bovik, \"A unified probabilistic formulation of image aesthetic assessment,\" IEEE Transactions on Image Processing, vol. 29, pp. 1548-1561, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.447, + 0.492, + 0.482 + ], + "angle": 0, + "content": "[51] G. C. Cupchik and J. László, Emerging visions of the aesthetic process: In psychology, semiology, and philosophy. Cambridge University Press, 1992." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.483, + 0.492, + 0.53 + ], + "angle": 0, + "content": "[52] X. Jin, L. Wu, G. Zhao, X. Li, X. Zhang, S. Ge, D. Zou, B. Zhou, and X. Zhou, \"Aesthetic attributes assessment of images,\" in Proceedings of the 27th ACM international conference on multimedia, 2019, pp. 311-319." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.53, + 0.492, + 0.577 + ], + "angle": 0, + "content": "[53] J. Ke, K. Ye, J. Yu, Y. Wu, P. Milanfar, and F. Yang, \"Vila: Learning image aesthetics from user comments with vision-language pretraining,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 10041-10051." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.577, + 0.492, + 0.625 + ], + "angle": 0, + "content": "[54] J. Kruk, C. Ziems, and D. Yang, \"Impressions: Visual semiotics and aesthetic impact understanding,\" in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, 2023, pp. 12273-12291." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.625, + 0.492, + 0.66 + ], + "angle": 0, + "content": "[55] R. Anil, A. M. Dai, O. First, M. Johnson, D. Lepikhin, A. Passos, S. Shakeri, E. Taropa, P. Bailey, Z. Chen et al., \"Palm 2 technical report,\" arXiv preprint arXiv:2305.10403, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.66, + 0.492, + 0.718 + ], + "angle": 0, + "content": "[56] R. Mao, G. Chen, X. Zhang, F. Guerin, and E. Cambria, \"GPTEval: A survey on assessments of ChatGPT and GPT-4,\" in Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024). ELRA and ICCL, 2024, pp. 7844-7866." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.718, + 0.492, + 0.764 + ], + "angle": 0, + "content": "[57] S. Zhao, L. A. Tuan, J. Fu, J. Wen, and W. Luo, \"Exploring clean label backdoor attacks and defense in language models,\" IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.765, + 0.492, + 0.812 + ], + "angle": 0, + "content": "[58] S. Zhao, X. Xu, L. Xiao, J. Wen, and L. A. Tuan, \"Clean-label backdoor attack and defense: An examination of language model vulnerability,\" Expert Systems with Applications, vol. 265, p. 125856, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.812, + 0.492, + 0.849 + ], + "angle": 0, + "content": "[59] J. Achiam, S. Adler, S. Agarwal, L. Ahmad, I. Akkaya, F. L. Aleman, D. Almeida, J. Altenschmidt, S. Altman, S. Anadkat et al., \"Gpt-4 technical report,\" arXiv preprint arXiv:2303.08774, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.849, + 0.492, + 0.895 + ], + "angle": 0, + "content": "[60] G. Team, R. Anil, S. Borgeaud, J.-B. Alayrac, J. Yu, R. Soricut, J. Schalkwyk, A. M. Dai, A. Hauth, K. Millican et al., \"Gemini: a family of highly capable multimodal models,\" arXiv preprint arXiv:2312.11805, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.077, + 0.895, + 0.492, + 0.942 + ], + "angle": 0, + "content": "[61] H. Touvron, L. Martin, K. Stone, P. Albert, A. Almahairi, Y. Babaei, N. Bashlykov, S. Batra, P. Bhargava, S. Bhosale et al., \"Llama 2: Open foundation and fine-tuned chat models,\" arXiv preprint arXiv:2307.09288, 2023." + }, + { + "type": "list", + "bbox": [ + 0.076, + 0.055, + 0.492, + 0.942 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.056, + 0.923, + 0.091 + ], + "angle": 0, + "content": "[62] H. Liu, W. Wang, H. Sun, A. Rocha, and H. Li, \"Robust domain misinformation detection via multi-modal feature alignment,\" IEEE Transactions on Information Forensics and Security, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.091, + 0.923, + 0.137 + ], + "angle": 0, + "content": "[63] R. Mao, K. He, C. Ong, Q. Liu, and E. Cambria, “Metapro 2.0: Computational metaphor processing on the effectiveness of anomalous language modeling,” in Findings of the Association for Computational Linguistics ACL 2024, 2024, pp. 9891–9908." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.137, + 0.923, + 0.172 + ], + "angle": 0, + "content": "[64] Z. Tan, D. Li, S. Wang, A. Beigi, B. Jiang, A. Bhattacharjee, M. Karami, J. Li, L. Cheng, and H. Liu, \"Large language models for data annotation: A survey,\" arXiv preprint arXiv:2402.13446, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.172, + 0.923, + 0.208 + ], + "angle": 0, + "content": "[65] R. Mao, G. Chen, X. Li, M. Ge, and E. Cambria, \"A comparative analysis of metaphorical cognition in chatgpt and human minds,\" Cognitive Computation, vol. 17, no. 1, p. 35, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.208, + 0.923, + 0.242 + ], + "angle": 0, + "content": "[66] Y. Jia, X. Wu, H. Li, Q. Zhang, Y. Hu, S. Zhao, and W. Fan, \"Uni-retrieval: A multi-style retrieval framework for stem's education,\" arXiv preprint arXiv:2502.05863, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.242, + 0.923, + 0.29 + ], + "angle": 0, + "content": "[67] J. Wei, X. Wang, D. Schuurmans, M. Bosma, F. Xia, E. Chi, Q. V. Le, D. Zhou et al., \"Chain-of-thought prompting elicits reasoning in large language models,\" Advances in neural information processing systems, vol. 35, pp. 24824-24837, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.29, + 0.923, + 0.336 + ], + "angle": 0, + "content": "[68] K. Cobbe, V. Kosaraju, M. Bavarian, M. Chen, H. Jun, L. Kaiser, M. Plappert, J. Tworek, J. Hilton, R. Nakano et al., \"Training verifiers to solve math word problems,\" arXiv preprint arXiv:2110.14168, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.336, + 0.923, + 0.371 + ], + "angle": 0, + "content": "[69] P. Wang, A. Chan, F. Ilievski, M. Chen, and X. Ren, \"Pinto: Faithful language reasoning using prompt-generated rationales,\" in The Eleventh International Conference on Learning Representations, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.371, + 0.923, + 0.418 + ], + "angle": 0, + "content": "[70] P. Wang, Z. Wang, Z. Li, Y. Gao, B. Yin, and X. Ren, \"Scott: Self-consistent chain-of-thought distillation,\" in Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2023, pp. 5546-5558." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.418, + 0.923, + 0.453 + ], + "angle": 0, + "content": "[71] H. Liu, Z. Teng, L. Cui, C. Zhang, Q. Zhou, and Y. Zhang, \"Logicot: Logical chain-of-thought instruction tuning,\" in The 2023 Conference on Empirical Methods in Natural Language Processing, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.453, + 0.923, + 0.499 + ], + "angle": 0, + "content": "[72] M. Kang, S. Lee, J. Baek, K. Kawaguchi, and S. J. Hwang, \"Knowledge-augmented reasoning distillation for small language models in knowledge-intensive tasks,\" Advances in Neural Information Processing Systems, vol. 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.499, + 0.923, + 0.535 + ], + "angle": 0, + "content": "[73] Y. Li, A. Dao, W. Bao, Z. Tan, T. Chen, H. Liu, and Y. Kong, \"Facial affective behavior analysis with instruction tuning,\" in European Conference on Computer Vision. Springer, 2025, pp. 165-186." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.535, + 0.923, + 0.57 + ], + "angle": 0, + "content": "[74] J. Guo, J. Deng, A. Lattas, and S. Zafeiriou, \"Sample and computation redistribution for efficient face detection,\" in International Conference on Learning Representations, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.57, + 0.923, + 0.628 + ], + "angle": 0, + "content": "[75] S. Wegreffer, J. Hessel, S. Swayamdipta, M. Riedl, and Y. Choi, \"Reframing human-ai collaboration for generating free-text explanations,\" in Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 2022, pp. 632–658." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.628, + 0.923, + 0.675 + ], + "angle": 0, + "content": "[76] L. Meng, H. Li, B.-C. Chen, S. Lan, Z. Wu, Y.-G. Jiang, and S.-N. Lim, \"Adavit: Adaptive vision transformers for efficient image recognition,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp. 12309-12318." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.675, + 0.923, + 0.72 + ], + "angle": 0, + "content": "[77] Z. Fu, L. Zhang, H. Xia, and Z. Mao, \"Linguistic-aware patch slimming framework for fine-grained cross-modal alignment,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 26307-26316." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.721, + 0.923, + 0.767 + ], + "angle": 0, + "content": "[78] C. Maddison, A. Mnih, and Y. Teh, \"The concrete distribution: A continuous relaxation of discrete random variables,\" in Proceedings of the international conference on Learning Representations. International Conference on Learning Representations, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.767, + 0.923, + 0.803 + ], + "angle": 0, + "content": "[79] Z. Zong, K. Li, G. Song, Y. Wang, Y. Qiao, B. Leng, and Y. Liu, \"Self-slimmed vision transformer,\" in European Conference on Computer Vision. Springer, 2022, pp. 432-448." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.803, + 0.923, + 0.837 + ], + "angle": 0, + "content": "[80] F. Faghri, D. J. Fleet, J. R. Kiros, and S. Fidler, \"Vse++: Improving visual-semantic embeddings with hard negatives,\" arXiv preprint arXiv:1707.05612, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.838, + 0.923, + 0.896 + ], + "angle": 0, + "content": "[81] L. Yang, J. Yu, C. Zhang, and J.-C. Na, \"Fine-grained sentiment analysis of political tweets with entity-aware multimodal network,\" in Diversity, Divergence, Dialogue: 16th International Conference, iConference 2021, Beijing, China, March 17–31, 2021, Proceedings, Part I 16. Springer, 2021, pp. 411–420." + }, + { + "type": "ref_text", + "bbox": [ + 0.508, + 0.896, + 0.923, + 0.942 + ], + "angle": 0, + "content": "[82] H. W. Chung, L. Hou, S. Longpre, B. Zoph, Y. Tay, W. Fedus, Y. Li, X. Wang, M. Dehghani, S. Brahma et al., \"Scaling instructionfinetuned language models,\" Journal of Machine Learning Research, vol. 25, no. 70, pp. 1-53, 2024." + }, + { + "type": "list", + "bbox": [ + 0.508, + 0.056, + 0.923, + 0.942 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.075, + 0.033, + 0.518, + 0.045 + ], + "angle": 0, + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.908, + 0.034, + 0.923, + 0.044 + ], + "angle": 0, + "content": "18" + }, + { + "type": "ref_text", + "bbox": [ + 0.075, + 0.055, + 0.492, + 0.079 + ], + "angle": 0, + "content": "[83] I. Loshchilov, \"Decoupled weight decay regularization,\" arXiv preprint arXiv:1711.05101, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.075, + 0.079, + 0.492, + 0.113 + ], + "angle": 0, + "content": "[84] K. He, X. Zhang, S. Ren, and J. Sun, \"Deep residual learning for image recognition,\" in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 770-778." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.113, + 0.492, + 0.158 + ], + "angle": 0, + "content": "[85] D. Tang, B. Qin, and T. Liu, \"Aspect level sentiment classification with deep memory network,\" in Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, 2016, pp. 214-224." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.158, + 0.492, + 0.204 + ], + "angle": 0, + "content": "[86] F. Fan, Y. Feng, and D. Zhao, \"Multi-grained attention network for aspect-level sentiment classification,\" in Proceedings of the 2018 conference on empirical methods in natural language processing, 2018, pp. 3433-3442." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.204, + 0.492, + 0.249 + ], + "angle": 0, + "content": "[87] J. D. M.-W. C. Kenton and L. K. Toutanova, \"Bert: Pre-training of deep bidirectional transformers for language understanding,\" in Proceedings of naacL-HLT, vol. 1. Minneapolis, Minnesota, 2019, p. 2." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.249, + 0.492, + 0.295 + ], + "angle": 0, + "content": "[88] N. Xu, W. Mao, and G. Chen, \"Multi-interactive memory network for aspect based multimodal sentiment analysis,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 33, 2019, pp. 371-378." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.295, + 0.492, + 0.34 + ], + "angle": 0, + "content": "[89] J. Yu, K. Chen, and R. Xia, \"Hierarchical interactive multimodal transformer for aspect-based multimodal sentiment analysis,\" IEEE Transactions on Affective Computing, vol. 14, no. 3, pp. 1966-1978, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.34, + 0.492, + 0.386 + ], + "angle": 0, + "content": "[90] D. Liu, L. Li, X. Tao, J. Cui, and Q. Xie, \"Descriptive prompt paraphrasing for target-oriented multimodal sentiment classification,\" in Findings of the Association for Computational Linguistics: EMNLP 2023, 2023, pp. 4174-4186." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.386, + 0.492, + 0.432 + ], + "angle": 0, + "content": "[91] B. Yang and J. Li, \"Visual elements mining as prompts for instruction learning for target-oriented multimodal sentiment classification,\" in Findings of the Association for Computational Linguistics: EMNLP 2023, 2023, pp. 6062-6075." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.432, + 0.492, + 0.498 + ], + "angle": 0, + "content": "[92] J. Camacho-Collados, K. Rezaee, T. Riahi, A. Ushio, D. Loureiro, D. Antypas, J. Boisson, L. E. Anke, F. Liu, and E. Martinez-Camara, \"Tweetnlp: Cutting-edge natural language processing for social media,\" in Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, 2022, pp. 38-49." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.499, + 0.492, + 0.546 + ], + "angle": 0, + "content": "[93] J. Ye, J. Zhou, J. Tian, R. Wang, Q. Zhang, T. Gui, and X.-J. Huang, \"Rethinkingtmsc: An empirical study for target-oriented multimodal sentiment classification,\" in Findings of the Association for Computational Linguistics: EMNLP 2023, 2023, pp. 270-277." + }, + { + "type": "ref_text", + "bbox": [ + 0.076, + 0.546, + 0.492, + 0.569 + ], + "angle": 0, + "content": "[94] M. Ivanova and S. French, The aesthetics of science: beauty, imagination and understanding. Routledge, 2020." + }, + { + "type": "list", + "bbox": [ + 0.075, + 0.055, + 0.492, + 0.569 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.076, + 0.596, + 0.199, + 0.689 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.208, + 0.582, + 0.492, + 0.708 + ], + "angle": 0, + "content": "Luwei Xiao is currently pursuing his Ph.D. degree in the School of Computer Science and Technology at East China Normal University, Shanghai, China, under the supervision of Prof. Liang He. He is presently conducting an academic visit to the College of Computing and Data Science at Nanyang Technological University, Singapore, under the supervision of Prof. Erik Cambria, with funding support from the China Scholarship Council (CSC). His research interests encompass multimodal learning, semi-" + }, + { + "type": "text", + "bbox": [ + 0.075, + 0.708, + 0.36, + 0.72 + ], + "angle": 0, + "content": "ment analysis, and image aesthetic assessment." + }, + { + "type": "image", + "bbox": [ + 0.08, + 0.768, + 0.195, + 0.884 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.208, + 0.767, + 0.492, + 0.893 + ], + "angle": 0, + "content": "Rui Mao is a Research Scientist and Lead Investigator at Nanyang Technological University. He obtained his Ph.D. degree in Computing Science from the University of Aberdeen. His research interest lies in NLP, cognitive computing, and their applications in finance and cognitive science. He and his funded company (Ruimao Tech) have developed an end-to-end system (MetaPro) for computational metaphor processing and a neural search engine (wensousou.com) for searching Chinese ancient po" + }, + { + "type": "text", + "bbox": [ + 0.074, + 0.893, + 0.492, + 0.94 + ], + "angle": 0, + "content": "ems with modern language. He served as Area Chair in COLING and EMNLP and Associate Editor in IEEE Transactions on Affective Computing, Expert Systems, Information Fusion and Neurocomputing. Contact him at rui.mao@ntu.edu.sg." + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.064, + 0.627, + 0.172 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.507, + 0.18, + 0.544, + 0.192 + ], + "angle": 0, + "content": "tacks." + }, + { + "type": "text", + "bbox": [ + 0.639, + 0.055, + 0.923, + 0.182 + ], + "angle": 0, + "content": "Shuai Zhao obtained his Ph.D. degree from Jinan University in 2024. He spent one year as a visiting student and six months as a research assistant at the School of Computer Science and Engineering, Nanyang Technological University. He is now a Postdoctoral Researcher at the College of Computing and Data Science, Nanyang Technological University. His current research interests include deep learning and natural language processing for code generation, summary generation, text classification and backdoor at" + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.239, + 0.625, + 0.356 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.639, + 0.238, + 0.923, + 0.365 + ], + "angle": 0, + "content": "Qika Lin received his Ph.D. degree at Xi'an Jiaotong University. Currently, he is a Research Fellow at the National University of Singapore. His research interests include natural language processing, knowledge reasoning, and multimodal learning. He has published papers in top-tier journals/conferences, including TKDE, ACL, SIGIR, KDD, ICDE, and IJCAI. He has actively contributed to several journals/conferences as a reviewer or PC member, including TPAMI, IJCV, TKDE, TMC, TNNLS, NeurIPS, ICLR, SIGIR," + }, + { + "type": "text", + "bbox": [ + 0.506, + 0.365, + 0.923, + 0.387 + ], + "angle": 0, + "content": "ACL, and EMNLP. He also served as a Guest Editor of IEEE TCSS and Information Fusion." + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.435, + 0.628, + 0.55 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.639, + 0.433, + 0.923, + 0.514 + ], + "angle": 0, + "content": "Yanhao Jia is a phd student at Nanyang Technological University. He obtained his bechealor degree in Computing Science from Shandong University. He has published over seven conference/journal papers on ECCV/NeurIPS/IEEE Trans on nuclear science and been the reviewer for ACM MM and ECCV." + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.596, + 0.619, + 0.712 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.639, + 0.594, + 0.923, + 0.699 + ], + "angle": 0, + "content": "Liang He received his PhD degree from the Department of Computer Science and Technology, East China Normal University, China. He is now a professor and the Vice Dean of the School of Computer Science and Technology, East China Normal University. His current research interest includes Natural Language Processing, Knowledge Processing, and Human in the Loop for Decision-making." + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.758, + 0.628, + 0.875 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.639, + 0.757, + 0.923, + 0.884 + ], + "angle": 0, + "content": "Erik Cambria is a Professor at Nanyang Technological University, where he also holds the appointment of Provost Chair in Computer Science and Engineering, and Founder of several AI companies, such as SenticNet, offering B2B sentiment analysis services, and finaXai, providing fully explainable financial insights. His research focuses on neurosymbolic AI for interpretable, trustworthy, and explainable affective computing in domains like social media monitoring, financial forecasting, and AI for social" + }, + { + "type": "text", + "bbox": [ + 0.505, + 0.884, + 0.923, + 0.942 + ], + "angle": 0, + "content": "good. He is an IEEE Fellow, Associate Editor of various top-tier AI journals, e.g., Information Fusion and IEEE Transactions on Affective Computing, and is involved in several international conferences as keynote speaker, program chair and committee member. Contact him at cambria@ntu.edu.sg." + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15848/c2a6d104-48f7-465b-bad3-e87ed3722daf_origin.pdf b/data/2025/2504_15xxx/2504.15848/c2a6d104-48f7-465b-bad3-e87ed3722daf_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..65559a646b30c0fcfcef3846069ca8d43f7ee253 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/c2a6d104-48f7-465b-bad3-e87ed3722daf_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30985af32fd5e26348572d17f58e614d9c670a316a6bc973a3ae6685081bddbc +size 14332032 diff --git a/data/2025/2504_15xxx/2504.15848/full.md b/data/2025/2504_15xxx/2504.15848/full.md new file mode 100644 index 0000000000000000000000000000000000000000..2a9541c03b4bf7415614848a9f6edabb6d959859 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/full.md @@ -0,0 +1,624 @@ +# Exploring Cognitive and Aesthetic Causality for Multimodal Aspect-Based Sentiment Analysis + +Luwei Xiao, Student Member, IEEE, Rui Mao*, Member, IEEE, Shuai Zhao, Qika Lin, Yanhao Jia, Liang He, and Erik Cambria, Fellow, IEEE + +Abstract—Multimodal aspect-based sentiment classification (MASC) is an emerging task due to an increase in user-generated multimodal content on social platforms, aimed at predicting sentiment polarity toward specific aspect targets (i.e., entities or attributes explicitly mentioned in text-image pairs). Despite extensive efforts and significant achievements in existing MASC, substantial gaps remain in understanding fine-grained visual content and the cognitive rationales derived from semantic content and impressions (cognitive interpretations of emotions evoked by image content). In this study, we present Chimera: a cognitive and aesthetic sentiment causality understanding framework to derive fine-grained holistic features of aspects and infer the fundamental drivers of sentiment expression from both semantic perspectives and affective-cognitive resonance (the synergistic effect between emotional responses and cognitive interpretations). Specifically, this framework first incorporates visual patch features for patch-word alignment. Meanwhile, it extracts coarse-grained visual features (e.g., overall image representation) and fine-grained visual regions (e.g., aspect-related regions) and translates them into corresponding textual descriptions (e.g., facial, aesthetic). Finally, we leverage the sentimental causes and impressions generated by a large language model (LLM) to enhance the model's awareness of sentimental cues evoked by semantic content and affective-cognitive resonance. Experimental results on standard MASC datasets demonstrate the effectiveness of the proposed model, which also exhibits greater flexibility to MASC compared to LLMs such as GPT-4o. We have publicly released the complete implementation and dataset at https://github.com/Xillv/Chimera + +Index Terms—Multimodal aspect-based sentiment classification, Sentiment causality, Large language models, Affective-cognitive resonance. + +# 1 INTRODUCTION + +MULTIMODAL aspect-based sentiment classification (MASC) is a valuable task for analyzing user-generated multimodal content on social platforms, aiming to predict the sentiment polarity of a specific target/aspect term within a sentence, based on an image-text pair. In an era marked by growing global interconnectedness, social platforms have become essential channels for individuals to express opinions and share experiences [1]-[3]. These platforms support multimodal content, blending text and visual media, which better reflects how sentiment is conveyed [4]. Consequently, analyzing fine-grained sentiment expression in multimodal scenarios not only improves the depth of sentiment classification but also aligns with the natural manner in which users express opinions and emotions, ultimately supporting more accurate sentiment analysis for applications in finance [5], [6], social research [7], [8], and human-computer interaction [9], [10]. Current methodologies for MASC can be broadly divided into two principal categories: visual-text fusion-based approaches and translation-based approaches. Visual-text fusion-based methods address MASC by directly integrating visual content with + +textual features through various attention-based mechanisms [11]-[16]. + +Yu et al. [11] were the first to propose the utilization of ResNet for image feature extraction in conjunction with BERT for language sequence modeling, subsequently feeding these components into a BERT encoder to facilitate the interactive modeling of cross-modal representations. Ling et al. [14] introduced a vision-language pre-training framework that leverages Faster R-CNN for extracting object-level visual features and BART for generating textual features, with the model pre-trained using three task-specific strategies targeting the language, vision, respectively. Yu et al. [13] presented a novel multi-task learning framework Image-Target Matching Network (ITM), which concurrently performs coarse-to-fine-grained visual-textual relevance detection and visual object-target alignment through cross-modal Transformers. Translation-based approaches focus on mapping visual content into the language space as auxiliary textual representations, leveraging this supplementary information, or integrating it with visual features to enhance MASC [17]-[22]. Khan et al. [17] translated the image into a corresponding caption, which is then jointly input with the sentence into BERT to predict the sentiment polarity associated with specific targets. Yang et al. [23] exploit a face-sensitive, translation-based approach that translates facial expressions in images into textual sentiment cues, which are then selectively aligned and fused with the targets for enhanced sentiment analysis. Xiao et al. [19] proposed the CoolNet framework, which generates visual captions for images and extracts syntactic and semantic features from the textual + +modality, subsequently fusing these with visual features through a cross-modal Transformer. + +Despite substantial efforts and promising advancements, current solutions continue to encounter the following challenges. First, excessive duplicative visual patches can overshadow critical visual clues relevant to the specific target, leading to considerable misalignment during patch-token interactions. These small visual patches often lack semantic coherence compared to complete visual regions, particularly when aligning targets with their corresponding objects in an image, potentially leading to ambiguous semantic representations. Second, limited studies have focused on the underlying rationale behind sentiment cues, particularly from the perspectives of semantic content and affective-cognitive resonance. Owing to the multimodal nature of Twitter content, which spans diverse facets of daily life, inferring the sentiment associated with specific targets necessitates not only an understanding of the surface-level information in text and images (e.g., facial expressions) but also an in-depth comprehension of the contextual background of particular events and the impressions evoked by the image's content and aesthetic attributes. + +To address the aforementioned challenges, this paper proposes Chimera: a cognitive and aesthetic sentiment causality understanding framework. This framework aims to incorporate and align fine-grained features of specific targets and reasons about semantic and impression rationales. However, two critical issues must be resolved to achieve these objectives: 1) How can specific targets in a sentence be aligned with their corresponding object-level fine-grained features in an image? 2) How can the model be enabled to reason about the emotional causal reasons within the semantic content of image-text pairs and the affective resonance evoked by image aesthetic attributes? For the first question, we propose to make the cross-modal alignment of the target via the visual patch-level by linguistic-aware patch-token alignment and object-level by accurately translating the object feature into language space. Regarding the second issue, while a recent study [24] developed a reasoning dataset for MASC, this dataset primarily explains the emotional causes within textual content and lacks reasoning capabilities for visual content and the affective resonance evoked by images, limiting its suitability for the multimodal nature of this task. Consequently, we employ a large language model (LLM), GPT-4o, to generate the semantic rationale and impression rationale to understand the causal foundations of emotions. + +Specifically, our proposed framework first extracts visual patch-level and textual features, feeding them into a tailored linguistic-aware patch-token alignment (LPA) module to achieve patch-token alignment. Concurrently, a translation module (TM) translates the holistic image or object-level content into aesthetic captions or facial descriptions, leveraging multimodal named entity annotations from the work of Wang et al. [25]. The TM-generated text, along with the sentence and aspect, is then input into a generative module for multi-task learning to produce sentiment polarity, semantic rationale (SR), and impression rationale (IR). By bootstrapping the model's perception of underlying rationale through an in-depth understanding of textual and + +visual content as well as the affective resonance evoked by images, it enhances the performance of sentiment classification. + +In a nutshell, the primary contributions are as follows: + +- We propose a novel framework for MASC that aligns specific targets with their corresponding visual objects at the patch-token and object levels while equipping the model with causal rationale reasoning ability for semantic rationale (SR), and impression rationale (IR). +- We approach this task by enabling the model to grasp the semantic content of image-text pairs and the affective resonance evoked by images. To our knowledge, we are the first to collect semantic and impression rationale data for the MASC task, based on existing MASC datasets, extending its content to incorporate semantic and impression rationale, offering a valuable resource for advancing MASC research. +- Experiments on three widely-used Twitter benchmarks demonstrate that our proposed method outperforms previous approaches, achieving state-of-the-art performance. Further evaluations validate the effectiveness of our approach for MASC tasks. + +The remainder of this paper is organized as follows: Section 2 provides an overview of related research on multimodal aspect-based sentiment classification, image aesthetic assessment, and multimodal learning. Section 3 details the proposed framework, including linguistics-aware patchtoken alignment, the translation-based module, causal rationale dataset construction, and LLM-based annotation generation. Main experimental results are presented in Section 4, and the in-depth analysis is shown in 5, followed by conclusions in Section 6. + +# 2 RELATED WORK + +This section reviews key methods in multimodal aspect-based sentiment analysis and image aesthetic assessment. Additionally, as our novel rationale dataset is constructed using an LLM, we introduce LLMs for data annotation. + +# 2.1 Multimodal Aspect-based Sentiment Analysis + +Sentiment analysis is a well-established research area focused on understanding and identifying human emotions and opinions across various contexts [26]–[31]. With the exponential growth of user-generated multimodal content (e.g., image-text pairs, video clips) on social platforms [32]–[35] has drawn substantial attention to Multimodal Aspect-based Sentiment Analysis (MABSA) [36]–[40]. The MABSA task consists of two sub-tasks: Multimodal Aspect Term Extraction (MATE) and our focused MASC task. MATE [41] is essentially a named entity recognition task aimed at identifying all relevant specific targets within the textual content of an image-text pair. MASC [42], [43] is a text classification task in which specific targets are provided, requiring the identification of their sentiment polarity (positive, neutral, or negative) based on the given image-text pair. A series of recent studies have successfully unified these two subtasks into a single framework, effectively + +![](images/c7f16f703165deced537a0b922aa2f80c5c899a274c670eeeceb35f3ea956d98.jpg) +Fig. 1. The overall framework of the proposed Chimera. Chimera consists of four parts: Translation Module, Rationale Dataset Construction, Linguistic-aware Semantic Alignment, and Rationale-Aware Learning. + +streamlining the MABSA process [14], [15], [22], [44]–[47]. Among these studies, Yu et al. [12] proposed the Entity-Sensitive Attention and Fusion Network (ESAFN), which employs entity-oriented attention combined with a visual gate mechanism to model entity-sensitive inter-dynamics for MASC. Ju et al. [44] were the first to integrate MATE and MASC into a end-to-end task, developing a joint learning framework with cross-modal relation detection. Kruk et al. [35] proposed a multimodal framework for Instagram intent detection, integrating three taxonomies and the MDID dataset. It demonstrates that text-image fusion enhances accuracy by $9.6\%$ under semiotic divergence, emphasizing the necessity of multimodal models for capturing the non-intersective "meaning multiplication" inherent in social media. Yang et al. [15] improved cross-modal alignment modeling through a Transformer-based multi-task learning framework, incorporating text-guided cross-modal interactions and using adjective-noun pairs as supervision for a visual auxiliary task. + +Zhou et al. [16] developed an aspect-oriented multimodal fusion approach that constructs an informative dependency graph to minimize additional visual and textual noise in cross-modal interactions by selectively processing aspect-relevant textual and image features. Huang et al. [20] put forward to mapping images into scene graphs, using triplet semantic relationships among entities along with image captions to construct a relatedness matrix for achieving cross-modal alignment in MASC. More recently, Xiao et al. [22] introduced the Atlantis, a trident-shaped architecture that incorporates aesthetic attributes to enhance the emotional resonance of visual content. Fan et al. [24] devised a Flant5-based multi-task learning architecture to enhance the + +model's reasoning capabilities for inferring underlying and direct causes of sentiment expressions. Additionally, they constructed a practical causal dataset for MASC. + +Our proposed method aims to achieve cross-modal alignment at the patch and object levels while equipping the model with reasoning capabilities to discern the semantic and impression rationale underlying sentiment expressions. + +# 2.2 Image Aesthetic Assessment + +Image aesthetics play a fundamental role in shaping viewers' emotional responses and overall aesthetic experience through complex psychological and cognitive processes [48]. Image aesthetics pertain to the subjective evaluation and appreciation of its beauty [49]. Image Aesthetic Assessment seeks to systematically appraise this aesthetic quality by analyzing the visual appeal of images [50]. Empirical psychological research corroborates that images can trigger a wide range of emotions, which are influenced by their aesthetic attributes and semantic content [51]. Previous research concentrated on aesthetic image captioning and analysis through the aggregation of commentary on aesthetic attributes [52]. These studies address the concepts of style, layout, and aesthetics from the viewpoints of beauty and visual attractiveness. Recent scholarly efforts have focused on encouraging vision-language models to generate visual connotations and captions related to various aesthetic attributes (e.g., color, harmony, lighting, composition) [53]. More recently, Kruk et al. [54] introduced a connotation-rich dataset, Impressions, designed to explore the emotions, thoughts, and beliefs that images evoke, along with the aesthetic elements that elicit these responses. The introduction of this dataset marks a significant advance in the study of + +how visual stimuli can influence complex perceptual and emotional outcomes. + +In this study, we utilize aesthetic attributes to capture sentiment cues within visual content at both object and holistic levels. Inspired by Impressions [54], we further prompt the LLM to generate impression rationales for MASC, enabling analysis of the underlying affective resonance evoked by images. + +# 2.3 LLMs-Based Rationale Generation + +Recently, LLMs have achieved significant success across various downstream tasks [55]–[58]. LLMs such as GPT-40 [59], Gemini [60], and LLaMA-2 [61] hold significant potential to usher data annotation into a new era, functioning not merely as auxiliary tools but as vital enhancers of its effectiveness and quality [62], [63]. LLMs can automatically annotate samples, ensure consistency across large data volumes, and adapt to specific domains via fine-tuning, thereby establishing a new standard in deep learning [64]–[66]. The rationale represents the detailed cognitive process an individual typically follows when solving a problem, providing useful supplementary information for the final answer [67]. Early studies [68] typically relied on human experts to annotate rationale in datasets, significantly limiting availability and scalability. A bunch of diverse methodologies have been developed to produce high-quality and fine-grained rationale. Wang et al. [69] proposed to elucidate each choice in a sample by generating choice-specific rationales via LLMs. Wang et al. [70] enhanced the credibility of generated rationales by incorporating gold-standard answers and using contrastive decoding algorithms. Liu et al. [71] laid much emphasis on curating high-quality prompts to obtain fine-grained rationales from GPT-4o and build a logical chain-of-thought instruction-tuning dataset. More recently, Kang et al. [72] developed a sophisticated neural reranking mechanism to dynamically retrieve highly relevant supplementary documents for generating high-quality rationales in knowledge-intensive reasoning tasks. + +In this paper, we build upon the work of Wang et al. [70] by fully utilizing the dataset's gold-standard annotations to generate semantic and impression rationales through meticulously designed prompts. This approach ensures high-quality rationale generation while avoiding additional costs from trial-and-error OpenAI API usage fees. + +# 3 METHODOLOGY + +This section presents our proposed framework for MASC, beginning with the task formalization, followed by the rationale dataset construction process, and concluding with the proposed method, comprising linguistic-aware semantic alignment, a translation module, rationale dataset construction and a rationale-aware learning framework. + +# 3.1 Task Definition + +Given a multimodal dataset $M$ , each sample $X_{i}$ consists of an image $V_{i}$ paired with a sentence $S_{i}$ containing one or more specific targets $T_{i}$ . The goal of MASC is to predict the sentiment polarity $Y_{i} \in \{\text{Positive}, \text{Negative}, \text{Neutral}\}$ for a specific target $T_{i}$ . Moreover, our framework infers + +both semantic rationale $SR_{i}$ and impression rationale $IR_{i}$ , explaining the sentiment prediction $Y_{i}$ for a specific target $T_{i}$ , based on multimodal semantic meaning and the affective resonance evoked by the image. In this study, the model outputs $SR_{i}, IR_{i}, Y_{i}$ for an input sample $X_{i} = (S_{i}, V_{i}, T_{i})$ , where $SR_{i}$ and $IR_{i}$ offer supplementary sentimental cues for sentiment prediction $T_{i}$ . + +# 3.2 Method Overview + +As shown in Figure 1, our proposed framework comprises four technical components, namely a Translation Module, Rationale Dataset Construction, Linguistic-aware Semantic Alignment, and Rationale-Aware Learning. The Translation Module converts visual content, both holistic and object-level, into language captions. For entire images, it generates emotion-laden aesthetic captions using our fine-tuned BLIP. For object-level content, it maps visuals to facial descriptions or aesthetic captions with rich emotional cues via EmoLA or our fine-tuned BLIP. The construction of the rationale dataset involves generating semantic and impression rationales. We curate prompts tailored to each rationale category and input them, along with the samples, into GPT-4o to collect the desired rationales. The Linguistic-aware Semantic Alignment module segments the input image into patches, dynamically selects and refines relevant visual patches, and achieves patch-token alignment guided by linguistic features from the input sentence. Lastly, we propose a Rationale-Aware Learning framework built up on a generative model that simultaneously learns sentiment classification, semantic rationale generation, and impression rationale generation from diverse textual inputs, such as sentences, aesthetic captions, and facial descriptions. + +# 3.3 Translation Module + +This module translates visual content into overall aesthetic captions, object-level facial descriptions, or object-level aesthetic captions in textual form, embedding rich sentimental cues to facilitate object-level sentiment alignment. Specifically, we leverage object annotations from the Fine-Grained Multimodal Named Entity Recognition (MNER) task [25], which annotates specific targets in the sentence and their corresponding objects in the image. The MNER dataset is derived from the same Twitter dataset as the MASC datasets, incorporating the original image-text pairs from MASC. We further pre-process the MNER dataset and transfer its object annotations to the MASC dataset. To generate aesthetic captions rich in sentimental cues, we fine-tune a BLIP model using the recent aesthetic-specific dataset, Impression [54]. For facial description, we deploy the LLM-based EmoLA [73] to interpret fine-grained human mental states from images. + +To tackle the challenge of potential one-to-many annotation scenarios, wherein multiple visual objects correspond to a specific target in the sentence, we calculate the similarity between the entire image and all object annotations, retaining only the object with the highest similarity score for each specific target. Subsequently, we generate various textual auxiliary sentences, based on object annotations. Firstly, in cases where the object corresponding to a specific target is absent from the image, a fine-tuned BLIP model is applied to + +generate an overall aesthetic caption $A^{c} = \left(a_{1}^{c}, a_{2}^{c}, \ldots, a_{N_{c}}^{c}\right)$ for the entire image: + +$$ +A ^ {c} = B L I P _ {\text {f i n e}} (V), \tag {1} +$$ + +where $BLIP_{fine}(\cdot)$ is the fine-tuned BLIP over Impression dataset. If the object corresponding to a specific target is present in the image, we develop a Human-Object Differentiation (HOD) module based on the Sample and Computation Redistribution for Efficient Face Detection (SCRFD) [74] framework. This module determines the presence of a face within the annotated object-level visual content and assigns a facial binary label: + +$$ +Y _ {i} ^ {o _ {j}} = H O D \left(V _ {i} ^ {o _ {j}}\right), \tag {2} +$$ + +where $Y_{i}^{o_{j}} \in [1,0]$ indicates whether the object-level visual content contains a face (0 for no face, 1 for face detected), and $V_{i}^{o_{j}}$ denotes the $j$ -th object-level visual content in the $i$ -th image. Subsequently, we generate facial descriptions or aesthetic captions for object-level visual content based on the facial binary label: + +$$ +A ^ {o} = \left\{ \begin{array}{l l} E m o L A \left(V _ {i} ^ {o _ {j}}\right), & \text {i f} Y _ {i} ^ {o _ {j}} = 1, \\ B L I P _ {\text {f i n e}} \left(V _ {i} ^ {o _ {j}}\right), & \text {o t h e r w i s e}, \end{array} \right. \tag {3} +$$ + +where $A^o = (a_1^o, a_2^o, \ldots, a_{N_o}^o)$ is the generated auxiliary sentence (facial description or aesthetic caption) for the object-level visual content. + +# 3.4 Rationale Dataset Construction + +The current MASC benchmark includes only specific target (aspect) labels within the image-text pair sentences and their corresponding sentiment polarities. Recently, Fan et al. [24] introduced a dataset for MASC with cause analysis, focusing exclusively on textual semantics rather than integrating both visual and textual cues. Moreover, they overlook the affective resonance evoked by image aesthetic attributes, eliminating a crucial layer of emotional cues and resulting in an incomplete sentiment representation. This omission hinders the holistic integration of textual and visual modalities, leading to suboptimal sentiment modeling. Therefore, we employ GPT-4o to generate semantic and impression rationales, with the detailed generation process outlined in Algorithm 1. + +Algorithm 1 Rationale Dataset Construction +Input: All samples $(V, S, T, Y)$ in MASC dataset $M$ +Output: Rationale dataset $R$ which contains Semantic Rationale (SR) and Impression Rationale (IR) +1: Design & refine prompt pool for SR (SRP) and IR (IRP) +2: for each sample $(V_i, S_i, T_i, Y_i)$ in $M$ do +3: //Randomly select a prompt from SRP for SR +4: $SR_{prompt} \gets PromptPoolforSR(V_i, S_i, T_i, Y_i)$ +5: //Randomly select a prompt from IRP for IR +6: $IR_{prompt} \gets PromptPoolforIR(V_i, S_i, T_i, Y_i)$ +7: Produce SR and IR via GPT-4o +8: $SR_i \gets GPT-4o(V_i, S_i, T_i, Y_i, SR_{prompt})$ +9: $IR_i \gets GPT-4o(V_i, S_i, T_i, Y_i, IR_{prompt})$ +10: Add $(V_i, S_i, T_i, Y_i, SR_i, IR_i)$ to $R$ +11: end for + +TABLE1 Example prompts for semantic rationale generation. + +
TypePrompts
System PromptYou are an AI assistant specializing in multimodal understanding and sentiment analysis, particularly in scenarios involving the integration of image and text modalities.
Semantic Rationale Generation PromptYou will be provided with an image-text pair. Your task is to analyze the sentiment towards the specified entity {aspect} and explain why the sentiment polarity {label} is appropriate. +Your explanation should consider both the semantic meaning of the text and the visual representation of the image, focusing on explicit content and the emotional or contextual cues conveyed by their combination. +Start your response with: "Based on the image-text pair, the sentiment towards {aspect} is {label} because...". Provide a concise, focused explanation highlighting the single most compelling reason for this sentiment classification.
+ +To comprehensively capture the emotional rationale underlying the identified sentiment polarity from a semantic perspective of both image and text, we employ GPT-4o (gpt-4o-2024-05-13) via the OpenAI $\mathrm{API}^1$ to generate SR. Meanwhile, to enable the model to effectively capture implicit emotional cues arising from the affective resonance of aesthetic attributes, we employ GPT-4o to generate the IR. + +To enhance the diversity of generated semantic and impression rationales (SR and IR), we designed and refined a series of templates to construct separate prompt pools for SR and IR, from which a prompt is randomly selected as instructions to guide GPT-4o in generating the corresponding rationale. In this study, we adopt the approach outlined by Sarah et al. [75] and Wang et al. [70], leveraging tailored prompts conditioned on the dataset's gold-standard annotations to generate SR and IR using GPT-4o. The example prompts for generating SR and IR are presented in Tables 1 and 2, respectively. + +# 3.5 Linguistic-aware Semantic Alignment(LSA) + +We first introduce dynamic patch selection in Sec. 3.5.1. Then, we introduce the semantic patch calibration in Sec. 3.5.2. and patch-token alignment in Sec. 3.5.3. The overall process of LSA is shown in the persucode 2. + +# 3.5.1 Dynamic Patch Selection(DPS) + +Dynamic Patch Selection (DPS) is considered a discriminative task that assigns significance scores to visual patches and selects valuable patches based on high scores. For the image in an image-text pair, we opt for vision Transformers as the visual encoder. The image $V$ is divided into $N_v$ non-overlapping patches by spatial distribution. + +These patches are then input as a visual token sequence into the vision Transformer to obtain a set of visual + +TABLE2 Example prompts for impression rationale generation. + +
TypePrompts
System PromptYou are an AI assistant specializing in multimodal emotion and aesthetic understanding, especially in analyzing the emotional responses elicited by visual content.
Impression Rationale Generation PromptYou will be given an image-text pair. Your task is to analyze the specified entity {aspect} and its associated sentiment label {label} based entirely on the image's aesthetic attributes and the emotional resonance it conveys.Focus exclusively on the overall impression and visual connotations conveyed by the image, emphasizing why the assigned sentiment {label} aligns with the general mood or perception evoked by the entity. Avoid mentioning specific details; instead, high-light the prevailing emotional or aesthetic impression.
+ +patch features $V = (v_{cls}, v_1, v_2, \ldots, v_{N_v}) \in \mathbb{R}^{(N_v + 1) \times d}$ . For sentence $S$ , a pre-trained Transformer serves as the textual encoder. The sentence is tokenized into $N_s$ tokens and processed by the encoder to extract linguistic features $S = (s_1, s_2, \ldots, s_{N_s}) \in \mathbb{R}^{N_s \times d}$ . Subsequently, we incorporate spatial information from images into visual patch features and use an MLP-based score-sensitive prediction mechanism to learn significant scores: + +$$ +p _ {i} ^ {s} = \operatorname {S i g m o i d} \left(\mathbf {M L P} \left(\boldsymbol {v} _ {i}\right)\right), i \in \{1, 2, \dots , N _ {v} \}, \tag {4} +$$ + +where $p_i^s \in [0,1]$ represents the importance score assigned to each visual patch. Moreover, achieving refined cross-modal alignment requires more than depending solely on a scoring mechanism to identify valuable visual patches without linguistic supervision [76], [77]. Consequently, we introduce linguistic context by calculating attentive scores between visual patches and the input sentence. First, we derive linguistic-aware scores $p_i^l$ through cross-attention between visual patches and linguistic features. Then, we enhance key visual content by computing self-attention within patches, producing image-prominent scores $p_i^e$ : + +$$ +p _ {i} ^ {l} = \operatorname {N o r m} \left(\boldsymbol {v} _ {i} \cdot S / d\right), p _ {i} ^ {e} = \operatorname {N o r m} \left(\boldsymbol {v} _ {i} \cdot V / d\right), \tag {5} +$$ + +where $\text{Norm}(\cdot)$ denotes the normalization of scores to a range from 0 to 1. $S$ and $V$ represent the global embeddings for linguistic features and visual patches, respectively. These scores are integrated to derive the final value score: + +$$ +p _ {i} ^ {f} = (1 - \beta) p _ {i} ^ {s} + \frac {\beta}{2} \left(p _ {i} ^ {l} + p _ {i} ^ {e}\right), \tag {6} +$$ + +where $\beta$ refers to the weight parameter. After obtaining the value score $p^f = (p_1^f, p_2^f, p_3^f, \ldots, p_{N_v}^f) \in \mathbb{R}^{N_v}$ , we convert it into a binary decision matrix $\{0, 1\}^{N_v}$ to determine patch selection. This matrix is constructed using the Gumbel-Softmax technique [78], ensuring a smooth and differentiable sampling process. The Gumbel-Softmax matrix + +Algorithm 2 Linguistic-aware Semantic Alignment (LSA) +1: procedure DYNAMIC PATCH SELECTION(V, S) +2: Extract visual patches $V \leftarrow \mathrm{ViT}(V)$ , text tokens $S \leftarrow$ TextEnc(S) +3: Compute significance scores: $p_i^s \leftarrow \mathrm{MLP}(v_i)$ , $p_i^l \leftarrow \mathrm{Norm}(v_i S^\top)$ , $p_i^e \leftarrow \mathrm{Norm}(v_i V^\top)$ +4: Fuse scores: $p_i^f \leftarrow (1 - \beta)p_i^s + \frac{\beta}{2}(p_i^l + p_i^e)$ +5: Apply Gumbel-Softmax sampling to obtain binary mask $D \in \{0, 1\}^{N_v}$ +6: Return selected patches $V^p \leftarrow \{v_i | D_i = 1\}$ +7: end procedure +8: procedure SEMANTIC PATCH CALIBRATION( $V^p$ ) +9: Aggregate key patches: $\tilde{V}^p \leftarrow \mathrm{Softmax}(\mathrm{MLP}(V^p)) \cdot V^p \quad \triangleright$ Adaptive weighting +10: Fuse redundant patches: $\tilde{v}^r \leftarrow \sum \tilde{p}_i v_i \quad \triangleright$ Weighted sum via $p^f$ +11: Return $\tilde{V}^p \leftarrow [v_{cls}; \tilde{V}^p; \tilde{v}^r]$ +12: end procedure +13: procedure PATCH-TOKEN ALIGNMENT( $\tilde{V}^p, S$ ) +14: Compute cosine similarity matrix $A \in \mathbb{R}^{(N_f + 2) \times N_s}$ +15: Calculate alignment score $K(V, S) \leftarrow \frac{1}{2} (\text{mean}(\text{max}_j A_{ij}) + \text{mean}(\text{max}_i A_{ij}))$ +16: Optimize with $\mathcal{L}_{\text{align}} \leftarrow \text{Bi-directional Triplet Loss}(K(V, S), K(V, \hat{S}), K(\hat{V}, S))$ +17: end procedure + +is defined as: + +$$ +\boldsymbol {M} _ {i, l} = \frac {\exp \left(\log \left(\boldsymbol {m} _ {i , l} + G _ {i , l}\right) / \tau\right)}{\sum_ {j = 1} ^ {L} \exp \left(\log \left(\boldsymbol {m} _ {i , j} + G _ {i , j}\right) / \tau\right)}, \tag {7} +$$ + +where $M \in \mathbb{R}^{N_v \times L}$ , $L$ indicates the total number of categories. In this scenario, $L$ is set to 2 for the binary decision $(\pmb{m}_{i,1} = p_i^f, \pmb{m}_{i,2} = 1 - p_i^f)$ . $G_i = -\log (-\log (U_i))$ represents the Gumbel distribution, $U_i$ refers to the uniform distribution and $\tau$ is the temperature parameter. + +Next, we obtain the differentiable decision matrix $D$ by applying the arg-max on $M$ : + +$$ +\boldsymbol {D} = \operatorname {S a m p l i n g} (\boldsymbol {M}) _ {*}, 1 \in \{0, 1 \} ^ {N _ {v}}, \tag {8} +$$ + +where $D$ indicates patch selection outcomes: "1" for important patches and "0" for redundant ones. In the training stage, gradients are backpropagated through the differentiable decision matrix, enabling the dynamic selection of valuable visual patches via the score-sensitive prediction mechanism. + +# 3.5.2 Semantic Patch Calibration(SPC) + +This section aims to further refine the semantic representation of the selected valuable visual patches. After dynamically selecting important visual patches guided by linguistic supervision, we designate them as $V^{p} = \left(v_{1}^{p}, v_{2}^{p}, \ldots, v_{N_{p}}^{p}\right) \in \mathbb{R}^{N_{p} \times d}$ . $N_{p}$ is the number of selected valuable visual patches. We employ an aggregation network [79] to model multiple aggregation weights and combine the selected $N_{p}$ visual patches to generate $N_{f}$ informative visual features: + +$$ +\tilde {\boldsymbol {v}} _ {j} ^ {p} = \sum_ {i = 1} ^ {N _ {p}} (\boldsymbol {W}) _ {i j} \cdot \boldsymbol {v} _ {i} ^ {p}, \quad j = [ 1, \dots , N _ {f} ], \tag {9} +$$ + +$$ +\boldsymbol {W} = \operatorname {s o f t m a x} \left(\mathbf {M L P} \left(\boldsymbol {V} ^ {p}\right)\right), \tag {10} +$$ + +where $(\mathbf{W})$ denotes the normalized weight matrix and $\sum_{i=1}^{N_s} (\mathbf{W})_{ij} = 1$ . $N_f$ is the number of aggregated patches $(N_f < N_p)$ . The aggregation network adaptively combines visually similar patches and is differentiable for end-to-end training. While redundant visual patches can be discarded, they may contain supplementary semantic features for refined cross-modal alignment. Therefore, we fuse them into a single patch: + +$$ +\tilde {\boldsymbol {v}} ^ {r} = \sum_ {i \in \mathcal {N}} \tilde {p} _ {i} \cdot \boldsymbol {v} _ {i}, \quad \tilde {p} _ {i} = \frac {\exp \left(p _ {i} ^ {f}\right) \boldsymbol {D} _ {i}}{\sum_ {i = 1} ^ {N} \exp \left(p _ {i} ^ {f}\right) \boldsymbol {D} _ {i}}, \tag {11} +$$ + +where $\mathcal{N}$ represents the set for redundant visual patches. $\tilde{p}_i$ denotes the normalized score of the value score $p_i^f$ . Finally, this component models the calibrated refined visual patches, denoted as $\tilde{V}^p = (v_{cls},\tilde{v}_1^p,\tilde{v}_2^p,\dots ,\tilde{v}_{N_f}^p,\tilde{v}^r)\in \mathbb{R}^{(N_f + 2)\times d}$ . + +# 3.5.3 Patch-token Alignment(PTA) + +This module aims to achieve the fine-grained patch-token level alignment. Specifically, we first utilize the refined visual patches $\tilde{V}^p$ and linguistic features $S$ to compute tokenwise similarities, producing a patch-token similarity matrix $A\in \mathbb{R}^{(N_f + 2)\times N_s}$ . $(A)_{ij} = \frac{(\tilde{v}_i)^T s_j}{\|\tilde{v}_i\| \|s_j\|}$ denotes the patch-token level alignment score between the $i$ -th visual patch and the $j$ -th word. Subsequently, maximum-correspondence interaction is introduced to aggregate cross-modal alignment. For each visual patch (or token), we identify the most aligned textual token (or patch) and calculate the average alignment score $K(V,S)$ , representing the overall alignment between the image $V$ and the sentence $S$ : + +$$ +K (V, S) = \frac {1}{N _ {f} + 2} \sum_ {i = 1} ^ {N _ {f} + 2} \max _ {j} (\boldsymbol {A}) _ {i j} + \frac {1}{N _ {s}} \sum_ {j = 1} ^ {N _ {s}} \max _ {i} (\boldsymbol {A}) _ {i j} \tag {12} +$$ + +Following a previous method [80], the bi-direction triplet loss with hard negative mining is exploited: + +$$ +\begin{array}{l} \mathcal {L} _ {\text {a l i g n}} = \sum_ {(V, S)} [ \gamma - K (V, S) + K (V, \hat {S}) ] _ {+} \tag {13} \\ + [ \gamma - K (V, S) + K (\hat {V}, S) ] _ {+}, \\ \end{array} +$$ + +where $\gamma$ is the trade-off parameter. $[x]_{+} = \max (x,0)$ and $(V,S)$ refers to a positive image-text pair in the mini-batch. Moreover, $\hat{S} = \operatorname{argmax}_{j\neq S}K(V,j)$ and $\hat{V} = \operatorname{argmax}_{i\neq V}K(i,V)$ indicate the hardest negative sentence and visual examples within a mini-batch, respectively. + +# 3.6 Rationale-aware Learning + +To endow the model with the ability to perform semantic causality and impression reasoning, we propose a rationale-aware learning framework designed to fine-tune a sequence-to-sequence (seq2seq) model. This seq2seq model is proposed to achieve three task objectives for each specific target within the image-text pair: sentiment classification (SC), semantic rationale generation (SRG), and impression rationale generation (IRG). These tasks are differentiated by the use of distinct input configurations and input content. For SC, the decoder outputs only the predicted sentiment + +polarity. In SRG and IRG, the decoder produces the corresponding rationale and the sentiment prediction. + +Specifically, our input comprises the textual sentence $S = (s_{1}, s_{2}, \ldots, s_{N_{s}})$ , the overall aesthetic caption of the image $A^{c} = (a_{1}^{c}, a_{2}^{c}, \ldots, a_{N_{c}}^{c})$ , the object-level description $A^{o} = (a_{1}^{o}, a_{2}^{o}, \ldots, a_{N_{o}}^{o})$ , which pertains to either facial or aesthetic attributes and the specific target $T$ . The input format is determined by the presence of the specific target within the visual content. For example, if the specific target is identified in the image, based on the annotations provided by Wang et al. [25], the input for SC, SRG, and IRG is defined as follows: + +$$ +H ^ {\mathrm {s c}} = \operatorname {e n c o d e r} \left(t _ {\langle \mathrm {s c} \rangle}, A ^ {c}, S, T\right), \tag {14} +$$ + +$$ +H ^ {\mathrm {s r g}} = \operatorname {e n c o d e r} \left(t _ {\langle \mathrm {s r g} \rangle}, A ^ {c}, S, T\right), \tag {15} +$$ + +$$ +H ^ {\text {i r g}} = \operatorname {e n c o d e r} \left(t _ {\langle \mathrm {i r g} \rangle}, A ^ {c}, S, T\right), \tag {16} +$$ + +where encoder $(\cdot)$ is the Transformer encoder of the seq2seq model. The tokens $t_{\langle \mathrm{sc}\rangle}, t_{\langle \mathrm{src}\rangle},$ and $t_{\langle \mathrm{irg}\rangle}$ are specialized tokens designed to represent distinct tasks. Although the specific aspects are not present in the image, this does not imply that sentimental cues from the image have no impact on predicting the sentiment polarity. On the contrary, incorporating sentiment cues from the holistic image can provide valuable insights into the influence of image aesthetic attributes on the sentiment prediction for the specific aspect. For samples where specific targets are present in the visual content, the input format is structured as follows: + +$$ +H ^ {\mathrm {s c}} = \operatorname {e n c o d e r} \left(t _ {\langle \mathrm {s c} \rangle}, S, A ^ {o}, T\right), \tag {17} +$$ + +$$ +H ^ {\mathrm {s r g}} = \operatorname {e n c o d e r} \left(t _ {\left(\mathrm {s r g}\right)}, S, A ^ {o}, T\right), \tag {18} +$$ + +$$ +H ^ {\text {i r g}} = \operatorname {e n c o d e r} \left(t _ {\langle \text {i r g} \rangle}, S, A ^ {o}, T\right). \tag {19} +$$ + +We employ fine-grained, object-level emotion-laden descriptions to establish alignment between specific targets and their corresponding objects in the image, which enhances both the accuracy and interpretability of the sentiment prediction process. Subsequently, these hidden features are passed through a stack of self-attention-based encoders, which dynamically fuse representations and model both intra-modal and cross-modal interactions. Finally, the decoder produces task-specific outputs. For Sentiment Classification (SC), the decoder generates the predicted sentiment polarity, selecting from "positive," "negative," or "neutral," denoted as $\hat{y}_{sc}$ : + +$$ +G ^ {\mathrm {s c}} = \left[ \langle \mathrm {s e n} \rangle \hat {y} ^ {\mathrm {s c}} \langle / \mathrm {s e n} \rangle \right], \tag {20} +$$ + +where the special tokens $\langle \mathrm{sen}\rangle$ and $\langle / \mathrm{sen}\rangle$ are denoted as the start and end markers for SC predictors. For the two additional rationale generation tasks SRG and IRG, the decoder generates not only the semantic rationale $\hat{s}r$ and impression rationale $\hat{i}r$ for the specific target but also their corresponding sentiment predictions $\hat{y}_{sr}$ and $\hat{y}_{si}$ : + +$$ +G ^ {\mathrm {s r}} = \left[ \langle \mathrm {s r} \rangle \hat {s} r \langle / \mathrm {s r} \rangle \langle \mathrm {s e n} \rangle \hat {y} ^ {\mathrm {s r}} \langle / \mathrm {s e n} \rangle \right], \tag {21} +$$ + +$$ +G ^ {\mathrm {i r}} = \left[ \langle \mathrm {i r} \rangle \hat {i r} \langle / \mathrm {i r} \rangle \langle \mathrm {s e n} \rangle \hat {y} ^ {\mathrm {i r}} \langle / \mathrm {s e n} \rangle \right], \tag {22} +$$ + +where $\langle \mathrm{sr}\rangle$ , $\langle / \mathrm{sr}\rangle$ , $\langle \mathrm{ir}\rangle$ , $\langle / \mathrm{ir}\rangle$ , $\langle \mathrm{sen}\rangle$ , and $\langle / \mathrm{sen}\rangle$ serve as specialized markers to delineate the rationale and sentiment polarity. Finally, the input sequence is uniformly denoted + +as $X$ , and the generated textual content is represented as $Z = \{z_{1}, z_{2}, \ldots, z_{N_{z}}\}$ . Consequently, the loss function for the generation process is formulated as follows: + +$$ +\mathcal {L} _ {Z} = - \frac {1}{N} \sum_ {i = 1} ^ {N} \sum_ {n _ {z} = 1} ^ {N _ {z}} \log P \left(z _ {i, n _ {z}} \mid \hat {z} _ {i, < n _ {z}}, X\right), \tag {23} +$$ + +where $z_{i,n_z}$ is the ground truth token at position $n_z$ for sample $i$ , $\hat{z}_{i, < n_z}$ represents the generated sequence up to position $n_z - 1$ for sample $i$ , and $P(z_{i,n_z} \mid \hat{z}_{i, < n_z}, X)$ denotes the probability of generating token $z_{i,n_z}$ conditioned on $\hat{z}_{i, < n_z}$ and $X$ . In this rationale-aware learning framework, since all objectives are formulated as generative tasks, the loss functions $\mathcal{L}_{SC}$ , $\mathcal{L}_{SRG}$ , and $\mathcal{L}_{IRG}$ are all employ the generative loss function, E.q. 23. Therefore, the objective function in the proposed method is formulated as follows: + +$$ +\mathcal {L} = \alpha \mathcal {L} _ {\mathrm {S C}} + \frac {1 - \alpha}{2} \mathcal {L} _ {\mathrm {S R G}} + \frac {1 - \alpha}{2} \mathcal {L} _ {\mathrm {I R G}} + \lambda \mathcal {L} _ {\text {a l i g n}}, \tag {24} +$$ + +where $\alpha, \lambda \in (0,1)$ are tradeoff hyperparameters that regulate the relative contributions of each generative loss and the patch-token alignment. + +# 4 EXPERIMENTS + +In this section, we provide a comprehensive description of the experimental settings and evaluate the proposed method on three publicly available MASC datasets, benchmarking it against state-of-the-art methods. Furthermore, we perform an extensive series of studies to thoroughly analyze the effectiveness of the proposed approach. + +# 4.1 Experimental Settings + +# 4.1.1 Datasets + +We utilize three widely recognized benchmark datasets for MASC [11], [81]: Twitter-2015, Twitter-2017, and the Political Twitter dataset. Each sample within these datasets comprises a user-generated multimodal image-text pair, including an image, a textual sentence, and one or more specific targets. Each aspect is annotated with a sentiment label from the set Positive, Negative, Neutral. The detailed statistics of these datasets are presented in Table 3. Furthermore, we incorporate semantic rationale (SR), impression rationale (IR), aesthetic captions for the entire image (AC), facial descriptions (FD), and aesthetic captions for objects (AO) for each data point. The maximum length for facial descriptions and aesthetic captions is constrained to 50 tokens. + +# 4.1.2 Implementation Details + +We adopt the seq2seq model Flan-T5 [82] as the backbone of our generative framework. Specifically, the model is trained for 10 epochs using the AdamW optimizer [83], with a batch size of 4. A grid search is performed on the development set to determine the optimal learning rate, $\alpha$ and $\lambda$ for Flan-T5 across the three datasets. The selected values for learning rate are $3e - 4$ , $3e - 4$ , $1e - 4$ , respectively, for the Twitter-2015, Twitter-2017 and Political Twitter. The trade-off hyperparameter sets $(\alpha$ and $\lambda)$ are 0.2, 0.1, 0.2 and 0.2, 0.5, 0.5, respectively, for the Twitter-2015, Twitter-2017 and Political Twitter. Consistent with prior research on MASC [11], [24], + +we employ Accuracy (Acc) and F1 score (F1) as the evaluation metrics. The model is implemented using PyTorch, and experiments are conducted on an NVIDIA V100 GPU with 30 GB of memory. + +# 4.2 Compared Baselines + +We conducted a comprehensive comparative evaluation of the proposed method against a range of robust baseline approaches, which are classified into three categories. The first category consists of image-only methods: + +- Res-Target [84] leverages ResNet as its backbone to extract visual features exclusively for predicting the sentiment of the specified target. + +The second category includes text-only approaches: + +- MemNet [85] employs a stacked architecture of multiple memory networks to build deep memory networks. +- MGAN [86] is based on a multi-grained attention architecture designed to adaptively capture both coarse-grained and fine-grained interactions. +- BERT [87] is a powerful pre-trained language model trained using a masked language modeling objective and next sentence prediction. + +Finally, this study incorporates the following advanced image-text multimodal approaches: + +- MIMN [88] comprises two customized interactive memory networks designed to capture both inter-modal dynamics between different modalities and intra-modal dynamics within each individual modality. +- ESAFN [12] is a target-sensitive interaction and fusion network designed to adaptively capture interactive features across modalities while also modeling intra-modality features. +- TomBERT [11] utilizes BERT and ResNet as backbone models for encoding textual and visual content, respectively. Cross-modal fusion is accomplished by integrating these features into a BERT encoder. +- JML-MASC [44] jointly extracts the specific targets and identifies their sentiment polarity by utilizing a visual de-nosing mechanism and attention-based fusion framework. +- EF-CapTrBERT [17] converts visual content into an auxiliary sentence, which is then combined with the input sentence and processed through a BERT encoder for sentiment prediction. +- VLP-MABSA [14] is a task-specific pre-trained generative framework for multimodal aspect-based sentiment analysis, built on the BART architecture. +- FITE [23] is a translation-based approach, which captures facial features in the image and translates them into a corresponding facial description as an auxiliary sentence for sentiment classification. +- CMMT-MASC [15] is a cross-modal multi-task Transformer designed for MASC. Additionally, it employs multimodal gating mechanisms to dynamically regulate the flow of textual and visual information during interactions. + +TABLE 3 Detailed Statistics of Twitter-2015, Twitter-2017, and Political Twitter datasets. The "#sentence" refers to the total number of sentences. "#Avg. Length" denotes the average length of sentences, while "#Avg. Aspect" indicates the average number of aspects in a sentence. "#Avg. Length of SR", "#Avg. Length of IR", "#Avg. Length of AC", "#Avg. Length of FD", and "#Avg. Length of AO" correspond to the average lengths of semantic rationales (SR), impression rationales (IR), aesthetic captions for the entire image, facial descriptions, and aesthetic captions for objects. + +
LabelTwitter-2015Twitter-2017Political Twitter
TrainDevTestTrainDevTestTrainDevTest
Positive92830331715085154933318570176
Neutral188367060716385175734697823368
Negative368149113416144168887166305
Total31791122103735621176123489021559849
#Sentence210172767417465775875105900407
#Avg. Length16.7216.7417.0516.2116.3716.3816.6216.6716.59
#Avg. Aspect1.511.541.542.042.042.101.741.732.09
#Avg. Length of SR42.542.442.542.642.843.042.742.642.2
#Avg. Length of IR56.756.055.755.556.155.455.956.156.3
#Avg. Length of AC35.935.935.532.532.531.634.034.233.3
#Avg. Length of FD39.238.537.838.938.539.339.038.438.7
#Avg. Length of AO29.129.730.328.929.428.929.129.131.3
+ +- HIMT [89] is a Transformer framework that incorporates a hierarchical interaction component to model the relationships between specific aspects and the input sentence, as well as the interactions between specific aspects and object-level visual content. +- IMT [13] is a coarse-to-fine-grained multimodal matching network that predicts image-target relevance and performs object-target alignment to support sentiment polarity identification. +- CoolNet [19] is a fine-grained cross-modal alignment approach that aligns textual and visual content from both semantic and syntactic perspectives. +- UnifiedTMSC [90] introduces a descriptive prompt paraphrasing paradigm to generate paraphrased prompts, while optimizing image vectors within the multimodal representation space of vision and language. +- VEMP [91] decodes the semantic information of visual elements by utilizing textual tokens in the image, target-aware adjective-noun pairs, and image captions. +- Atlantis-MASC [22] is a trident-shaped, aesthetic-driven approach for joint MABSA, which integrates image aesthetic attributes and achieves effective alignment of vision and text across multiple granular levels. +- MDCA [24] is a generative framework proposed to provide supplementary reasoning and explicit rationales to explain why specific content conveys certain sentiment. + +# 4.3 Main Results + +The main results are presented in Table 4. Given that the two additional rationale generation tasks contribute to improving sentiment prediction by providing explanations for the underlying causes of sentiment, we select the prediction + +results from sentiment classification $\hat{y}^{\mathrm{sc}}$ as the primary outcomes for accuracy and F1 score evaluation. As presented in Table 4, the proposed method demonstrates competitive performance on both Twitter datasets compared to strong baselines from both text-only and multimodal approaches. Specifically, it achieves the highest accuracy (81.61%) and F1 score (77.98%) on the Twitter-2015 dataset, as well as the best accuracy (75.62%) and a near-optimal F1 score (74.59%) on the Twitter-2017 dataset. + +Compared to the image-only approach (Res-Target), the proposed method achieves a remarkable improvement of over $21.73\%$ in accuracy on the Twitter-2015 dataset. Similarly, when compared to the best-performing text-only method (BERT), the proposed method demonstrates a substantial performance gain, with a $7.46\%$ increase in accuracy and a $9.12\%$ improvement in F1 on Twitter-2015. These observations underscore the limitations of single-modality approaches in capturing subtle sentiment cues from multimodal content. Moreover, the proposed method consistently outperforms recent multimodal models, such as UnifiedTMSC, Atlantis-MASC, and MDCA. For instance, UnifiedTMSC adopts a paraphrasing-based approach to enrich textual features but lacks explicit modeling of visual aesthetic-driven affective impact. On Twitter-2017, the proposed method achieves comparable F1 performance (74.59 vs. 74.70) while delivering higher accuracy (75.62 vs. 75.40), which highlights the complementary benefits of aesthetic affective resonance modeling. While Atlantis-MASC incorporates image aesthetics, it primarily relies on global alignment techniques, which may overlook the intricate relationships between aspects and objects. The proposed method surpasses Atlantis-MASC by $1.58\%$ in accuracy on Twitter-2017, underscoring the efficacy of its patch-token level and object-level alignment in capturing aspect-specific visual details. While MDCA incorporates reasoning and direct causality to explain sentiment causes, it primarily emphasizes textual semantic reasoning, which restricts its + +TABLE 4 The main results $(\%)$ are presented with the best-performing results highlighted in bold and the second-best values indicated with underlined text. + +
ModalityModelVenueTwitter-2015Twitter-2017Political Twitter
AccF1AccF1AccF1
Image OnlyRes-TargetCVPR 201659.8846.4858.5953.9860.2158.42
Text OnlyMemNetEMNLP 201670.1161.7664.1860.90--
MGANEMNLP 201871.1764.2164.7561.4667.3762.78
BERTNAACL 201974.1568.8668.1565.2369.4164.25
Image and TextMIMNAAAI 201971.8465.6965.8862.9970.5265.39
ESAFNTASLP 201973.3867.3767.8364.2269.2264.66
TomBERTIJCAI 201977.1571.1570.3468.0369.6562.35
JML-MASCEMNLP 202178.70-72.70-70.1468.37
EF-CapTrBERTACM MM 202178.0173.2569.7768.4269.0464.94
VLP-MABSAACL 202278.6073.8073.8071.8070.3269.64
CMMT-MASCIPM 202277.90-73.8---
FITEEMNLP 202278.4973.9070.9068.7068.6465.83
HIMTTAC 202278.1473.6871.1469.16--
IMTIJCAI 202278.2774.1972.6171.9769.9267.86
CoolNetIPM 202379.9275.2871.6469.5870.9170.25
UnifiedTMSCEMNLP 202379.8076.3075.4074.70--
VEMPEMNLP 202378.8875.0973.0172.42--
Atlantis-MASCINFFUS 202479.03-74.20-69.8368.97
MDCATNNLS 202480.7177.1573.9172.3771.3870.94
OursChimera-81.6177.9875.6274.5972.5672.32
+ +ability to effectively capture detailed visual content and the corresponding aesthetic affective resonance. In contrast, the proposed method surpasses MDCA with a $0.90\%$ improvement in accuracy and a $0.83\%$ increase in F1 on the Twitter-2015 dataset. This performance gain highlights the advantages of comprehensively understanding sentiment causality from both visual-textual semantic and affective resonance perspectives. + +# 4.4 Results on Political Twitter + +The Political Twitter dataset differs significantly from Twitter-2015 and Twitter-2017, especially due to its challenging domain shift between training, development, and test sets. Such domain differences create substantial barriers to generalization, which makes the task particularly suitable for advanced models that can comprehend subtle causality and context shifts. + +From Table 4, it can be observed that the proposed Chimera demonstrates distinct advantages over existing approaches on the Political Twitter dataset. Compared to the third best performing method CoolNet, which achieved $71.32\%$ accuracy and $69.64\%$ F1 score, Chimera showcases a significant improvement. Similarly, MDCA, which performed with an accuracy of $71.38\%$ and an F1 score of $70.94\%$ , still lags behind Chimera. Additionally, we observed that the discrepancy between accuracy and F1-score significantly narrows as accuracy increases, particularly when accuracy surpasses $70\%$ . We hypothesize that the underlying cause may lie in the relatively balanced class distribution of sentiment categories (e.g., positive, neutral, + +negative) within the Political Twitter test set (as shown in Table 3). At higher accuracy levels, the ratios of false positives to false negatives exhibit increasing symmetry across models. This equilibrium consequently reduces the divergence between precision and recall metrics, thereby causing the F1-score - defined as their harmonic mean - to naturally converge with accuracy. + +# 4.5 Ablation Study + +To systematically investigate the influence of the linguistic-aware semantic alignment module, including semantic and impression rationale reasoning as well as object-level fine-grained alignment, on sentiment prediction, we conducted a series of ablation studies and the results are shown in Table 5. + +As presented in Table 5, the exclusion of semantic rationale ("w/o SRG") results in a noticeable performance decline across all three datasets. This effect is particularly pronounced on the Twitter-2017 and Political Twitter datasets, where nearly all evaluation metrics, including accuracy and F1 score, exhibit a reduction of approximately $2\%$ . Similarly, the absence of impression rationale reasoning ("w/o IRG") results in performance fluctuations on the Twitter-2015 and Political Twitter datasets. However, the most noticeable effect is observed on the Twitter-2017 dataset, where the model's performance exhibits a significant degradation, particularly in the sentiment classification task, with nearly a $4\%$ drop in both accuracy and F1 score. The results ("w/o IRG & AC") reveal consistent performance degradation in both Accuracy and F1-score across all three datasets. Particularly noteworthy + +TABLE 5 The results $(\%)$ of the ablation study for our Chimera model are presented. The top-performing values emphasized in bold and the second-best values distinguished using underlined text. The notations "w/o SRG," "w/o IRG," and "w/o SRG & IRG" denote the exclusion of the respective generative tasks. "w/o IRG & AC" refers to the removal of IR generation task and replace the aesthetic caption (AC) with general caption. "w/o LSA" represents the removal of the Linguistic-aware Semantic Alignment branch, while "w/o OD" indicates the exclusion of object-level descriptions (e.g., facial descriptions and object-level aesthetic captions) from the input sequence. + +
MethodTwitter-2015Twitter-2017Political Twitter
AccF1AccF1AccF1AccF1AccF1AccF1AccF1AccF1AccF1
SCSRGIRGSCSRGIRGSCSRGIRG
Chimera81.6177.9881.1277.1177.5673.5575.6274.5975.0973.6471.9668.2372.5672.3271.6971.4069.3068.95
w/o SRG80.5276.10--75.8370.9673.5072.49--70.6667.2070.4369.88--68.2567.58
w/o IRG80.2375.2280.0375.42--71.8870.1672.670.73--71.1570.7071.0170.52--
w/o IRG & AC80.6776.0380.1176.46--71.5969.8372.2570.33--70.6270.0671.0470.47--
w/o SRG & IRG77.2471.82----71.2368.98----67.8867.20----
w/o LSA80.5477.0379.7576.2276.5272.0373.7270.9674.3872.2671.3667.8871.8671.3770.9270.5568.4367.99
w/o OD79.9676.0880.0976.3277.1272.8473.0670.8574.3772.3671.1167.5371.6471.1271.1270.7768.5568.07
w/o Aes-cap80.0375.2779.9476.0575.6971.0872.3671.6472.2871.2169.2865.4469.4368.9469.3769.0067.8567.27
+ +![](images/4da63db8ee69d0dc75759d822365f0103e1750ba58561bdf4257661e7d41d2c8.jpg) + +![](images/08133c089ed25791aec10225277c395d31439bd5d26d91d73a8056da7ca18d6f.jpg) + +![](images/ef430b2b0457b39e9f33224cf48720981aa5772b0bdcca9226f222196ae0fd43.jpg) + +![](images/d9c00fe46e11a776cce92beb84c5862921e6256a790e3398fc8e625562168895.jpg) +Fig. 2. Results $(\%)$ on hyper-parameter of $\alpha$ and $\lambda$ . + +![](images/be9a387bddeba4b58e166a6984c8c5b0867e9a4c5e45612fe0aa5451666328af.jpg) + +![](images/050be5835958eb54c08c32fca9f8350415aab027b5d014443ebe81b11f308c55.jpg) + +is the model's inferior performance on Twitter-2017 and Political Twitter datasets compared to the baseline(w/o IRG). However, an unexpected performance improvement emerges in Twitter-2015, surpassing even the configuration retaining aesthetic captions as input. This phenomenon may be attributed to dataset-specific characteristics in sample distribution. As detailed in Table 3, Twitter-2015 exhibits a significantly higher proportion of neutral-class samples compared to Twitter-2017 and Political Twitter. When the Chimera model is deprived of its reasoning abilities for both semantic and impression rationales ("w/o SRG & IRG"), its performance on sentiment classification declines to the lowest levels across all datasets. Specifically, a consistent reduction of approximately $4 - 5\%$ is observed in nearly all metrics, underscoring the essential role of rationale-based reasoning in enhancing the effectiveness and accuracy of sentiment analysis tasks. These results show that the influence of rationale reasoning differs across datasets. For Twitter-2017, with its balanced sentiment distribution (see Table 3), impression rationale has a greater impact on sentiment analysis. In contrast, both semantic and impression rationales contribute to the other two datasets, but neither is dominant. + +The LSA branch plays a pivotal role in the Chimera model by bridging the semantic gap between textual and visual modalities, ensuring effective alignment of information across visual and textual data. Its removal (w/o LSA) consistently leads to a significant decline in performance across all datasets, as evident in the ablation study. For instance, on Twitter-2015, the accuracy drops from $81.61\%$ to $80.54\%$ , and the F1 score decreases from $77.98\%$ to $77.03\%$ . Similarly, for Twitter-2017, accuracy, and F1 score dropped to $73.72\%$ and $70.96\%$ , respectively. By aligning linguistic and visual features, the branch allows the model to effectively interpret semantic overlaps and contrasts, enabling more accurate sentiment predictions. + +Object-level descriptions (e.g., facial expressions and object-level aesthetic captions) enrich the input sequence by providing object-level detailed visual context. The ablation study reveals that removing OD (w/o OD) causes noticeable performance drops. On Twitter-2015, accuracy drops by 1.65 percentage points, and the F1 score decreases by 1.90 percentage points. Similarly, on Twitter-2017, accuracy is reduced by 2.56 percentage points, while the F1 score drops by 3.74 percentage points. Without the OD, the model + +loses access to these fine-grained visual features, leading to diminished interpretability and accuracy, particularly in datasets where visual information plays a crucial role in determining sentiment. Additionally, the aesthetic caption is excluded from the input sequence to assess its impact on performance (w/o Aes-cap). As demonstrated in Table 5, the absence of aesthetic features results in a noteworthy decline in performance across all datasets, particularly in the impression rationale generation (IRG) task. This leads to Chimera exhibiting the poorest sentiment classification performance for IRG on the Twitter-2017 and Political Twitter datasets, which underscore the importance of aesthetic captions in guiding the model to generate coherent and emotionally nuanced impressions. + +# 4.6 Hyper-parameter Analysis + +We conduct a hyperparameter analysis to explore the impact of $\alpha$ and $\lambda$ on the Chimera model's performance across the Twitter-2015, Twitter-2017, and Political Twitter datasets. Hyperparameter $\alpha$ regulates the balance between sentiment classification (SC) and rationale generation components (semantic and impression rationales, SRG, and IRG), while $\lambda$ controls the weight of patch-token alignment within the overall loss function. As shown in Figure 2, for all datasets, a lower $\alpha$ , which assigns greater weight to rationale generation, generally improves model performance, with values around 0.1 to 0.2 achieving the highest accuracy and F1 scores. This emphasizes the significance of integrating semantic and impression rationales in MASC. As $\alpha$ increases, favoring SC loss, performance plateaus or declines, particularly for the Political Twitter dataset, indicating that reduced emphasis on rationale generation diminishes the model's ability to capture fine-grained sentiment context effectively. Moreover, the results indicate that increasing $\lambda$ initially enhances model performance, with diminishing returns beyond a certain threshold. For the Twitter-2015 and Political Twitter datasets, moderate $\lambda$ values [0.2, 0.5] achieve optimal accuracy and F1 scores, while higher values ( $\lambda > 0.6$ ) lead to performance stabilization or slight decline. This observation indicates that balanced alignment between visual and textual features enhances the model's interpretability and accuracy and excessively high $\lambda$ values may negatively impact performance, likely due to overemphasis on alignment at the expense of core sentiment classification. For Twitter-2017, a similar trend is observed, although performance variations are less pronounced. + +# 5 IN-DEPTH ANALYSIS + +# 5.1 Quality Analysis of Rationale + +Table 6 provides an evaluation of the sentiment rationale quality for both the ground-truth and Chimera-generated content, aiming to analyze their impact on sentiment analysis. A pre-trained sentiment classification model [92] is employed to assess the intuitive sentiment quality of these rationales across three test datasets by inputting the rationales into the model and analyzing the sentiment predictions. For both SR and IR, the results in the GroundTruth row represent the upper performance bound. It is evident that the ground truth performance for SR significantly exceeds that of IR, indicating that semantic rationales + +TABLE 6 The evaluation results $(\%)$ of rationale quality. The best-performing results highlighted in bold. + +
Rationale SourceTwitter-2015Twitter-2017Political
AccF1AccF1AccF1
Semantic Rationale
Ground-Truth99.0499.0498.5498.5497.6497.64
Chimera80.9180.8375.0474.9370.2070.14
Impression Rationale
Ground-Truth69.9169.9072.7772.7176.876.87
Chimera63.4563.6561.6759.3860.5460.12
+ +![](images/f035b5be188bc560f8e6ccdde0472213968c026b15af35b35081f552ab580ea2.jpg) +Fig. 3. Human evaluation of factuality, clarity and fluency for SR and IR. + +are more critical for this task than impression rationales. We hypothesize that two factors contribute to this discrepancy. Firstly, as illustrated in Table 3, semantic rationales are shorter in length and straightforward, facilitating easy comprehension, while the emotions elicited by images are inherently more abstract and multifaceted. Secondly, the IR's reliance on visual cues contrasts sharply with the Twitter dataset's text-centric sentiment distribution. Prior research has shown that a considerable majority of targets (around $58\%$ ) are absent from images [13], and most targets (93% in Twitter-2015) exhibit emotional coherence with their textual counterparts [93]. This misalignment underscores the dataset's limitations in evaluating IRs and necessitates a nuanced understanding of the interplay between visual and textual sentiment representations. + +A total of 180 samples were randomly selected for human evaluation, with 100 samples drawn from the training set, 40 from the testing set, and 40 from the validation set of both the Twitter-2015 and Twitter-2017 datasets. Four native English speakers with Master's degrees in the arts were recruited to assess the quality of the rationale data based on three criteria: (1) factuality, evaluating whether the rationale is grounded in accurate and verifiable information; (2) clarity, assessing the logical structure and comprehensibility of the rationale; and (3) fluency, measuring the grammatical accuracy and smoothness of the language used. The Fleiss' Kappa $(\kappa)$ values for the initial evaluation across the four raters were as follows: factuality $\kappa = 0.922$ , clarity $\kappa = 0.945$ , and fluency $\kappa = 0.960$ . In cases of disagreement, the evaluators engaged in discussions to reach a consensus. + +Figure 3 presents the results of the human evaluation. It can be observed that SR consistently exhibits higher quality + +![](images/d0fccd6c9c3806a0763a2979525d5a81dbcd338f8180464516628387f00a7ac6.jpg) +Fig. 4. Assessment of sentiment intensity for SR and IR in both ground truth data and Chimera-generated content. + +across all metrics, which verifies that the employed LLM is capable of generating appropriate rationale data for specific tasks when provided with concrete ground-truth labels. In comparison to SR, IR demands a more in-depth understanding of visual content and is inherently more subjective. Consequently, IR is more prone to issues of factuality and clarity, as interpreting the abstract aesthetic and emotional elements conveyed by an image often involves subjective reasoning, which may lead to misalignment with objective ground truths or human expectations. + +# 5.2 Quantitative Analysis of Rationale + +We conduct a quantitative analysis on the test sets of ground truth and Chimera-generated content to examine the impact of varying levels of sentiment intensity in cognitive rationales on the accuracy of sentiment prediction, including their potential to amplify or diminish predictive performance. As illustrated in Figure 4, the sentiment intensity distributions of Twitter-2015 and Twitter-2017 reveal distinct patterns. Specifically, the sentiment intensity of IR demonstrates a noticeable bias toward positive values, whereas the sentiment intensity of SR aligns more closely with the sentiment polarity label distribution presented in Table 3. + +This observation suggests that IR demonstrates a bias toward positive samples, increasing the model's confidence in predicting positive instances. While this bias may be beneficial for datasets with a higher proportion of positive samples (e.g., Twitter-2017), it could lead to additional bias in datasets with a limited representation of positive samples. This finding is further corroborated by the ablation study results, which reveal that the performance of the Chimera model without IR is worse on Twitter-2017 compared to its performance on Twitter-2015. Another notable observation is that, for the ground truth of the Political Twitter dataset, the sentiment intensity distribution of IR is relatively uniform across all ranges. In contrast, the Chimera-generated content for IR exhibits a more distinguishable sentiment intensity distribution compared to the ground truth, which further validates the quality of SR, the effectiveness of the proposed Chimera training paradigm, and the robustness of Chimera's performance. + +![](images/3acadf58ed5ee68a3128615d89f084eeca07b2f9bf82d1cd486f6dc56fa12528.jpg) +Fig. 5. Visualization of the top 15 most frequent aesthetic-related words in generated IR. + +# 5.3 Impact of Aesthetic Attributes on Sentiment + +To investigate the impact of image aesthetic attributes on sentiment analysis, we visualize the frequency of aesthetic-related words within the impression rationales generated by our proposed Chimera model and its variant "Chimera w/o Aes-cap" on the Twitter-2015 and Twitter-2017 test sets. Specifically, we visualize the top 15 most frequent aesthetic-related words within the generated IR, based on the aesthetic attributes defined by Milena et al. [94]. As shown in Figure 5, the frequency analysis of aesthetic-related words for Chimera on Twitter-2015 and Twitter-2017 reveals that "visual," "vibrant," "focus," and "design" prominently appear across both datasets. These terms, associated with visual clarity, expressive quality, image composition, and cohesiveness, align with the model's improved accuracy and F1 scores. However, excluding the aesthetic caption from the input results in subtle shifts in the frequency distribution of these aesthetic-related terms. For Twitter-2015, the overall frequency distribution of aesthetic-related terms shows minimal change, with a slight increase in "focus" and a decrease in "vibrant". In contrast, for Twitter-2017, "focus" shows a significant increase, while "visual" and "vibrant" decrease notably. Combined with the ablation study results in Table 5, the absence of aesthetic captions in the input + +
Input image-text pair & Auxiliary sentencesMethodSemantic Rationale (SR of GT, Chimera and ChatGPT-4o) Reasoning Cause(RC of MDCA)Impression Rationale (IR of GT and Chimera)/ Direct Cause(DC of MDCA)Sentiment
RT @ MamiHerald : [Joanne Stiger]POS and Melissa Keller are the first same - sex couple to be married in Broward :Ground Truth (GT)Based on the image-text pair, the sentiment towards Joanne Stiger in the given text and image is positive, because the text highlights a historic and celebratory event of her marriage, and the image shows people smiling and appearing joyful, indicating happiness and approval.The image emanates a joyous and celebratory atmosphere, highlighted by the smiling faces and relaxed postures of the individuals present. The presence of photographers and the gathering of people around the couple enhance the sense of an important and positive milestone being reached. Overall, the visual conveys happiness, love, and a significant moment being warmly embraced by all involved.Positive
ChimeraThe sentiment towards Joanne Stiger in the given text and image is positive, because the text highlights her as the first same-sex couple to be married in Broward, and the image shows her smiling and looking happy, which conveys a sense of pride and joy.The image depicts two individuals who appear relaxed and happy, with one person appearing content and relaxed. The overall impression is one of happiness and contentment. The setting and body language suggest a sense of happiness, together with a pleasant and celebratory atmosphere.Positive
Aesthetic caption: The body language of the two people shows that they are in a very different way. The background shows a room with a lot of people and the cameraman is in the background.MDCABecause the tweet simply reports on a factual event without expressing any emotional response towards Joanne Stiger. The use of the word "first" suggests that the author is excited about the news and is highlighting the fact that Stiger and Keller are the first same-sex couple to be married in Broward. Overall, the tweet does not provide enough information to determine a positive or negative sentiment towards JoAnn Stiger herself.Because joanne stiger and Melissa keller are the first same - sex couple to be married in bronward.Neutral
GPT-4oBased on the image-text pair, the sentiment towards Joanne Stiger in the given text and image is Positive, because both the faces of Joanne Stiger and Melissa Keller, as well as the individuals around them, display expressions of happiness and joy, indicating a celebratory and momentous occasion.Positive
Primary Football Cup . [St Bede]' s v St Augustines friendly . . . both out but now for the friendly # sportsmanshipGround TruthBased on the image-text pair, the sentiment towards St Bede in the given text and image is neutral, because the text describes the event as a friendly match with a focus on sportsmanship, and the image shows a regular football game without any emotional or evaluative expressions towards St Bede.The image depicts a friendly football match between St Bede's and St Augustine's on a lush green field under a partly cloudy sky. The overall scene conveys a sense of camaraderie and sportsmanship, aligning well with the neutral sentiment label. The focus appears to be on enjoying the game and fostering friendly competition in a relaxed atmosphere.Neutral
ChimeraThe sentiment towards St Bede in the given text and image is neutral, because the text simply states that St Bede is participating in a friendly match without expressing any positive or negative emotions, and the image shows a standard football match scene without any clear emotional cues.The image of the St Bede vs. St Augustines football match conveys a sense of camaraderie and sportsmanship. The players are engaged in a moment of play, suggesting a positive and supportive atmosphere. The neutral sentiment label aligns with the overall feeling of cambered athleticism and teamwork, without evoking strong emotions.Neutral
Aesthetic caption: The position of the camera captures the players in action, as they are engaged in a game of soccer. the position of the camera captures the players in action, and the position of the camera captures the players in action.MDCABecause the writer is promoting the team's friendly football match with St. Albers, which suggests that they are a fan of the team. The use of the hashtag #sportsmanship also suggests that the writer has a positive opinion of St. Bede. Overall, the language and tone of the tweet suggest that the sentiment towards St. Bede is positive.It is a friendly football cup.Positive
GPT-4oBased on the image-text pair, the sentiment towards St Bede in the given text and image is Positive, because the text highlights a friendly and sportsmanlike attitude despite both teams being out of the competition, and the image shows players engaging in the match, fostering a positive environment.Positive
[Michael Oer]NEG is latest reminder that NFL pain lasts 24 / 7 / 365Ground TruthBased on the image-text pair, the sentiment towards Michael Oer in the given text and image is negative, because the text highlights the constant and enduring pain associated with his NFL career, implying a prolonged and difficult experience.The image of Michael Oer in his NFL uniform, combined with the sentiment label "negative", conveys a sense of enduring struggle and fatigue. His posture and facial expression suggest a feeling of exhaustion or frustration, which aligns with the text's implication that the pain and challenges of an NFL career are relentless and unforgiving. The overall impression is one of continuous physical and emotional burden.Negative
ChimeraThe sentiment towards Michael Oer in the given text and image is negative, because the text highlights that his recent reminder that NFL pain lasts 24/7/365, which is a serious and distressing reminder.The image of Michael Oer exudes a sense of determination and focus, aligning with the negative sentiment label. His serious expression and the context of NFL pain underscore a feeling of exhaustion and exhaustion, which complements the idea of prolonged pain and the accompanying reminder that "no matter how much you love it," evoking a negative feeling.Negative
MDCABecause the tweet does not express any clear positive or negative opinion towards him. The tweet simply states that he is a reminder that NFL pain lasts for a specific time period. There is no indication of any emotional attachment or opinion towards Michael Oer in the tweet, and the language used is informative rather than emotional. Therefore, the sentiment towards Michael Oer in this tweet is neutral.He is reminder that NFL pain lasts 24 / 7 / 365.Neutral
GPT-4oBased on the image-text pair, the sentiment towards Michael Oer in the given text and image is Negative, because the text highlights the unending pain associated with NFL careers, using Michael Oer as an example, and alludes to the physical and possibly emotional toll he endures.Negative
+ +Fig. 6. Three examples showcasing the predictions generated by Chimera, MDCA, and GPT-4o are presented for analysis. During the evaluation process, GPT-4o exclusively produces the semantic rationale (SR). The input image-text pair and auxiliary sentences are utilized solely by Chimera. For MDCA, the reasoning cause (RC), direct cause (DC), and sentiment prediction are derived through direct inference. + +leads to the worst sentiment analysis performance across all datasets on IRG. This highlights the critical role of aesthetic captions in enhancing the model's understanding of image aesthetics, particularly in datasets like Twitter-2017 with balanced sentiment distributions. Specifically, attributes such as "visual" and "vibrant" positively contribute to sentiment analysis performance, whereas "focus" appears to significantly impair it. We speculate that since "focus" emphasizes specific image elements, potentially leads to an unbalanced interpretation of visual content. This localized emphasis can narrow the model's analytical scope, prioritizing details at + +the expense of broader context and compositional harmony. Consequently, the model may struggle to capture holistic aesthetic and emotional cues essential for accurate sentiment classification. + +# 5.4 Comparison with Large Language Models + +We evaluate the performance of GPT-4o on the MASC task under a zero-shot setting. As shown in Table 7, GPT-4o achieves an accuracy of $46.87\%$ and an F1 score of $47.47\%$ , which is substantially lower than Chimera, which reports $81.61\%$ accuracy and $77.98\%$ F1 score. On the Twitter-2017 + +TABLE 7 The experimental results $(\%)$ of GPT-4o on the MASC task under a zero-shot setting are presented. The best-performing results highlighted in bold. The term "dis" refers to the percentage of samples where the sentiment polarity associated with a specific aspect cannot be discerned. + +
MethodTwitter-2015Twitter-2017
AccF1DisAccF1Dis
Chimera81.6177.98-75.6274.59-
GPT-4o46.8747.470.256.0853.280.5
GPT-4o w/o image67.0262.38-59.6460.35-
+ +dataset, GPT-4o shows an improvement with an accuracy of $56.08\%$ and an F1 score of $53.28\%$ . However, this performance still trails behind Chimera, which reports $75.62\%$ accuracy and $74.59\%$ F1 score. Surprisingly, removing the image input results in an improvement in the model's accuracy and F1 score, reaching $67.02\%$ and $62.38\%$ on the Twitter-2015 dataset, respectively. This observation contrasts sharply with the phenomenon observed in the baseline model. Similarly, in the Twitter-2017 dataset, the performance of GPT-4o without image input is slightly better than with the image input. We speculate that in task-specific models, incorporating image data typically improves sentiment classification performance, as these models are finetuned to leverage multi-modal inputs effectively. However, in a zero-shot setting, GPT-4o operates based on its general pre-trained knowledge, which may not be fully optimized for combining textual and visual inputs for sentiment analysis. In this setting, adding image input may introduce noise rather than meaningful information. Moreover, GPT-4o has a low Dis value on both datasets, which slightly decreases to 0 when the image input is removed. This further suggests that the model's ability to distinguish sentiment polarity is, to a certain extent, influenced by the inclusion of the visual modality. + +# 5.5 Case Study + +An additional case study is performed to provide a more comprehensive evaluation of the effectiveness of the proposed Chimera model. Figure 6 illustrates three representative examples, each corresponding to positive, neutral, and negative samples, respectively. As illustrated in the first example, MDCA is the sole model to predict "Neutral" for the target "Joanne Stiger," whereas the other three models accurately predict "Positive". This result is primarily due to the RC and DC generated by MDCA, which lack the expression of positive or negative sentiment. Notably, the RC predominantly emphasizes the textual content, overlooking the joyful atmosphere conveyed through the image. In the second example, an intriguing observation is that the situation is the exact opposite of the previous case. Here, only Chimera correctly predicts the sentiment polarity of the specific target, "St. Bede" as "Neutral" whereas both GPT-4o and MDCA incorrectly classify it as "Positive". It is observed that the SR of GPT-4o and the RC of MDCA both convey a positive sentiment, largely due + +to an overinterpretation and extrapolation of the textual content. In contrast, Chimera demonstrates accurate prediction by appropriately integrating a balanced understanding of the image content and its aesthetic attributes. In the final example, both Chimera and GPT-4o accurately identify the sentiment polarity of "Michael Oher" as "Negative". MDCA's incorrect prediction of "Neutral" may be attributed to its generated RC and DC failing to account for the individual's expression, thereby overlooking critical semantic cues present in the visual content. With the aid of facial descriptions, Chimera effectively captures and aligns fine-grained emotional cues from visual content, enabling it to generate coherent SR and IR and achieve accurate predictions. The above representative instances further verify that incorporating cognitive and aesthetic sentiment causality enhances sentiment classification accuracy in MABSA. + +# 6 CONCLUSION + +In this paper, we propose a cognitive sentiment causality understanding framework tailored for multimodal aspect-based sentiment classification. The framework, which is novel in its approach, consists of four primary components: linguistic-aware semantic alignment, a translation module, rationale dataset construction, and rationale-aware learning. The linguistic-aware semantic alignment component facilitates visual patch-token level alignment through dynamic patch selection and semantic patch calibration. The translation module transforms holistic image and object-level visual information into corresponding emotion-laden textual representations. The rationale dataset construction involves designing refined prompts and leveraging LLMs to generate semantic and impression rationale. Finally, rationale-aware learning incorporates semantic explanations and affective-cognitive resonance to enhance the model's capacity to understand cognitive sentiment causality. Experimental results on three Twitter datasets demonstrate that the proposed Chimera achieves performance gains over SOTA baselines. + +# ACKNOWLEDGMENTS + +This research is supported by the Shanghai Science and Technology Innovation Action Plan (No. 24YF2710100), the Shanghai Special Project to Promote High-quality Industrial Development (No. RZ-CYAI-01-24-0288), the National Nature Science Foundation of China (No. 62477010), the Science and Technology Commission of Shanghai Municipality Grant (No. 22511105901, No. 21511100402), the Ministry of Education, Singapore under its MOE Academic Research Fund Tier 2 (STEM RIE2025 Award MOE-T2EP20123-0005) and by the RIE2025 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) (Award I2301E0026), administered by A\*STAR, as well as supported by Alibaba Group and NTU Singapore. + +# REFERENCES + +[1] R. Mao, Q. Liu, K. He, W. Li, and E. Cambria, "The biases of pretrained language models: An empirical study on prompt-based sentiment analysis and emotion detection," IEEE Transactions on Affective Computing, vol. 14, no. 3, pp. 1743-1753, 2023. + +[2] K. Du, F. Xing, R. Mao, and E. Cambria, "Financial sentiment analysis: Techniques and applications," ACM Computing Surveys, vol. 56, no. 9, pp. 1-42, 2024. +[3] R. Mao, M. Ge, S. Han, W. Li, K. He, L. Zhu, and E. Cambria, "A survey on pragmatic processing techniques," Information Fusion, vol. 114, p. 102712, 2025. +[4] L. Xiao, Y. Xue, H. Wang, X. Hu, D. Gu, and Y. Zhu, "Exploring fine-grained syntactic information for aspect-based sentiment classification with dual graph neural networks," Neurocomputing, vol. 471, pp. 48-59, 2022. +[5] Y. Ma, R. Mao, Q. Lin, P. Wu, and E. Cambria, "Quantitative stock portfolio optimization by multi-task learning risk and return," Information Fusion, vol. 104, p. 102165, 2024. +[6] K. Du, F. Xing, R. Mao, and E. Cambria, "FinSenticNet: A concept-level lexicon for financial sentiment analysis," in 2023 IEEE Symposium Series on Computational Intelligence (SSCI). IEEE, 2023, pp. 109-114. +[7] X. Zhang, R. Mao, and E. Cambria, "SenticVec: Toward robust and human-centric neurosymbolic sentiment analysis," in Findings of the Association for Computational Linguistics: ACL. Association for Computational Linguistics, 2024, pp. 4851-4863. +[8] S. Zhao, M. Jia, L. A. Tuan, F. Pan, and J. Wen, "Universal vulnerabilities in large language models: Backdoor attacks for incontext learning," in Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, 2024, pp. 11507-11522. +[9] L. Zhu, R. Mao, E. Cambria, and B. J. Jansen, "Neurosymbolic AI for personalized sentiment analysis," in Proceedings of International Conference on Human-Computer Interaction (HCII), 2024, pp. 269-290. +[10] S. Zhao, J. Wen, A. Luu, J. Zhao, and J. Fu, "Prompt as triggers for backdoor attack: Examining the vulnerability in language models," in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, 2023, pp. 12303-12317. +[11] J. YU and J. JIANG, "Adapting bert for target-oriented multimodal sentiment classification.(2019)," in Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, 2019, pp. 5408-5414. +[12] J. Yu, J. Jiang, and R. Xia, "Entity-sensitive attention and fusion network for entity-level multimodal sentiment classification," IEEE/ACM Transactions on Audio, Speech, and Language Processing, vol. 28, pp. 429-439, 2019. +[13] J. Yu, J. Wang, R. Xia, and J. Li, "Targeted multimodal sentiment classification based on coarse-to-fine grained image-target matching." in *IJCAI*, 2022, pp. 4482-4488. +[14] Y. Ling, J. Yu, and R. Xia, "Vision-language pre-training for multimodal aspect-based sentiment analysis," in Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2022, pp. 2149-2159. +[15] L. Yang, J.-C. Na, and J. Yu, "Cross-modal multitask transformer for end-to-end multimodal aspect-based sentiment analysis," Information Processing & Management, vol. 59, no. 5, p. 103038, 2022. +[16] R. Zhou, W. Guo, X. Liu, S. Yu, Y. Zhang, and X. Yuan, "Aom: Detecting aspect-oriented information for multimodal aspect-based sentiment analysis," in Findings of the Association for Computational Linguistics: ACL 2023, 2023, pp. 8184-8196. +[17] Z. Khan and Y. Fu, "Exploiting bert for multimodal target sentiment classification through input space translation," in Proceedings of the 29th ACM international conference on multimedia, 2021, pp. 3034-3042. +[18] L. Xiao, E. Zhou, X. Wu, S. Yang, T. Ma, and L. He, "Adaptive multi-feature extraction graph convolutional networks for multimodal target sentiment analysis," in 2022 IEEE International Conference on Multimedia and Expo (ICME). IEEE, 2022, pp. 1-6. +[19] L. Xiao, X. Wu, S. Yang, J. Xu, J. Zhou, and L. He, "Cross-modal fine-grained alignment and fusion network for multimodal aspect-based sentiment analysis," Information Processing & Management, vol. 60, no. 6, p. 103508, 2023. +[20] Y. Huang, Z. Chen, J. Chen, J. Z. Pan, Z. Yao, and W. Zhang, "Target-oriented sentiment classification with sequential cross-modal semantic graph," in International Conference on Artificial Neural Networks. Springer, 2023, pp. 587-599. +[21] Q. Wang, H. Xu, Z. Wen, B. Liang, M. Yang, B. Qin, and R. Xu, "Image-to-text conversion and aspect-oriented filtration for multimodal aspect-based sentiment analysis," IEEE Transactions on Affective Computing, 2023. +[22] L. Xiao, X. Wu, J. Xu, W. Li, C. Jin, and L. He, "Atlantis: Aesthetic-oriented multiple granularities fusion network for joint multi- + +modal aspect-based sentiment analysis," Information Fusion, vol. 106, p. 102304, 2024. +[23] H. Yang, Y. Zhao, and B. Qin, "Face-sensitive image-to-emotional-text cross-modal translation for multimodal aspect-based sentiment analysis," in Proceedings of the 2022 conference on empirical methods in natural language processing, 2022, pp. 3324-3335. +[24] R. Fan, T. He, M. Chen, M. Zhang, X. Tu, and M. Dong, "Dual causes generation assisted model for multimodal aspect-based sentiment classification," IEEE Transactions on Neural Networks and Learning Systems, 2024. +[25] J. Wang, Z. Li, J. Yu, L. Yang, and R. Xia, "Fine-grained multimodal named entity recognition and grounding with a generative framework," in Proceedings of the 31st ACM International Conference on Multimedia, 2023, pp. 3934-3943. +[26] X. Zhang, R. Mao, K. He, and E. Cambria, "Neurosymbolic sentiment analysis with dynamic word sense disambiguation," in Findings of the Association for Computational Linguistics: EMNLP 2023, Singapore, 2023, pp. 8772-8783. +[27] Q. Lu, X. Sun, Y. Long, Z. Gao, J. Feng, and T. Sun, "Sentiment analysis: Comprehensive reviews, recent advances, and open challenges," IEEE Transactions on Neural Networks and Learning Systems, 2023. +[28] H. Liu, W. Wang, and H. Li, "Interpretable multimodal misinformation detection with logic reasoning," in Findings of the Association for Computational Linguistics: ACL 2023, 2023, pp. 9781-9796. +[29] R. Mao, K. Du, Y. Ma, L. Zhu, and E. Cambria, "Discovering the cognition behind language: Financial metaphor analysis with MetaPro," in 2023 IEEE International Conference on Data Mining (ICDM). IEEE, 2023, pp. 1211-1216. +[30] E. Cambria, X. Zhang, R. Mao, M. Chen, and K. Kwok, "SenticNet 8: Fusing emotion AI and commonsense AI for interpretable, trustworthy, and explainable affective computing," in Proceedings of International Conference on Human-Computer Interaction (HCI), Washington DC, USA, 2024, pp. 197-216. +[31] K. Du, R. Mao, F. Xing, and E. Cambria, "Explainable stock price movement prediction using contrastive learning," in Proceedings of the 33rd ACM International Conference on Information and Knowledge Management (CIKM), Idaho, USA, 2024, pp. 529-537. +[32] H. Zhang, X. Zhou, Z. Shen, and Y. Li, "Privfr: Privacy-enhanced federated recommendation with shared hash embedding," IEEE Transactions on Neural Networks and Learning Systems, 2024. +[33] E. Yang, L. Shen, G. Guo, X. Wang, X. Cao, J. Zhang, and D. Tao, "Model merging in llms, mllms, and beyond: Methods, theories, applications and opportunities," arXiv preprint arXiv:2408.07666, 2024. +[34] L. Xiao, R. Mao, X. Zhang, L. He, and E. Cambria, "Vanessa: Visual connotation and aesthetic attributes understanding network for multimodal aspect-based sentiment analysis," in Findings of the Association for Computational Linguistics: EMNLP 2024, 2024, pp. 11486-11500. +[35] J. Kruk, J. Lubin, K. Sikka, X. Lin, D. Jurafsky, and A. Divakaran, "Integrating text and image: Determining multimodal document intent in instagram posts," in Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), 2019, pp. 4622-4632. +[36] H. Liu, W. Wang, and H. Li, "Towards multi-modal sarcasm detection via hierarchical congruity modeling with knowledge enhancement," in Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, 2022, pp. 4995-5006. +[37] R. Mao and X. Li, "Bridging towers of multi-task learning with a gating mechanism for aspect-based sentiment analysis and sequential metaphor identification," in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, 2021, pp. 13534-13542. +[38] T. Yue, R. Mao, H. Wang, Z. Hu, and E. Cambria, "KnowleNet: Knowledge fusion network for multimodal sarcasm detection," Information Fusion, vol. 100, p. 101921, 2023. +[39] C. Fan, J. Lin, R. Mao, and E. Cambria, "Fusing pairwise modalities for emotion recognition in conversations," Information Fusion, vol. 106, p. 102306, 2024. +[40] L. Yang, Z. Wang, Z. Li, J.-C. Na, and J. Yu, "An empirical study of multimodal entity-based sentiment analysis with chatgpt: Improving in-context learning via entity-aware contrastive learning," Information Processing & Management, vol. 61, no. 4, p. 103724, 2024. +[41] L. Yang, J. Wang, J.-C. Na, and J. Yu, "Generating paraphrase sen + +tences for multimodal entity-category-sentiment triple extraction," Knowledge-Based Systems, vol. 278, p. 110823, 2023. +[42] J. Zhou, J. Zhao, J. X. Huang, Q. V. Hu, and L. He, "Masad: A large-scale dataset for multimodal aspect-based sentiment analysis," Neurocomputing, vol. 455, pp. 47-58, 2021. +[43] W. Zhang, X. Li, Y. Deng, L. Bing, and W. Lam, "A survey on aspect-based sentiment analysis: Tasks, methods, and challenges," IEEE Transactions on Knowledge and Data Engineering, vol. 35, no. 11, pp. 11019-11038, 2022. +[44] X. Ju, D. Zhang, R. Xiao, J. Li, S. Li, M. Zhang, and G. Zhou, "Joint multi-modal aspect-sentiment analysis with auxiliary cross-modal relation detection," in Proceedings of the 2021 conference on empirical methods in natural language processing, 2021, pp. 4395-4405. +[45] J. Mu, F. Nie, W. Wang, J. Xu, J. Zhang, and H. Liu, "Mocolnet: A momentum contrastive learning network for multimodal aspect-level sentiment analysis," IEEE Transactions on Knowledge and Data Engineering, 2023. +[46] F. Zhao, C. Li, Z. Wu, Y. Ouyang, J. Zhang, and X. Dai, "M2df: Multi-grained multi-curriculum denoising framework for multimodal aspect-based sentiment analysis," in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, 2023, pp. 9057-9070. +[47] E. Cambria, R. Mao, M. Chen, Z. Wang, and S.-B. Ho, "Seven pillars for the future of artificial intelligence," IEEE Intelligent Systems, vol. 38, no. 6, pp. 62-69, 2023. +[48] R. Arnheim, Art and visual perception: A psychology of the creative eye. Univ of California Press, 1954. +[49] V. S. Ramachandran and W. Hirstein, "The science of art: A neurological theory of aesthetic experience," Journal of Consciousness Studies, vol. 6, no. 6-7, pp. 15-51, 1999. +[50] H. Zeng, Z. Cao, L. Zhang, and A. C. Bovik, "A unified probabilistic formulation of image aesthetic assessment," IEEE Transactions on Image Processing, vol. 29, pp. 1548-1561, 2019. +[51] G. C. Cupchik and J. László, Emerging visions of the aesthetic process: In psychology, semiology, and philosophy. Cambridge University Press, 1992. +[52] X. Jin, L. Wu, G. Zhao, X. Li, X. Zhang, S. Ge, D. Zou, B. Zhou, and X. Zhou, "Aesthetic attributes assessment of images," in Proceedings of the 27th ACM international conference on multimedia, 2019, pp. 311-319. +[53] J. Ke, K. Ye, J. Yu, Y. Wu, P. Milanfar, and F. Yang, "Vila: Learning image aesthetics from user comments with vision-language pretraining," in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 10041-10051. +[54] J. Kruk, C. Ziems, and D. Yang, "Impressions: Visual semiotics and aesthetic impact understanding," in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, 2023, pp. 12273-12291. +[55] R. Anil, A. M. Dai, O. First, M. Johnson, D. Lepikhin, A. Passos, S. Shakeri, E. Taropa, P. Bailey, Z. Chen et al., "Palm 2 technical report," arXiv preprint arXiv:2305.10403, 2023. +[56] R. Mao, G. Chen, X. Zhang, F. Guerin, and E. Cambria, "GPTEval: A survey on assessments of ChatGPT and GPT-4," in Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024). ELRA and ICCL, 2024, pp. 7844-7866. +[57] S. Zhao, L. A. Tuan, J. Fu, J. Wen, and W. Luo, "Exploring clean label backdoor attacks and defense in language models," IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2024. +[58] S. Zhao, X. Xu, L. Xiao, J. Wen, and L. A. Tuan, "Clean-label backdoor attack and defense: An examination of language model vulnerability," Expert Systems with Applications, vol. 265, p. 125856, 2025. +[59] J. Achiam, S. Adler, S. Agarwal, L. Ahmad, I. Akkaya, F. L. Aleman, D. Almeida, J. Altenschmidt, S. Altman, S. Anadkat et al., "Gpt-4 technical report," arXiv preprint arXiv:2303.08774, 2023. +[60] G. Team, R. Anil, S. Borgeaud, J.-B. Alayrac, J. Yu, R. Soricut, J. Schalkwyk, A. M. Dai, A. Hauth, K. Millican et al., "Gemini: a family of highly capable multimodal models," arXiv preprint arXiv:2312.11805, 2023. +[61] H. Touvron, L. Martin, K. Stone, P. Albert, A. Almahairi, Y. Babaei, N. Bashlykov, S. Batra, P. Bhargava, S. Bhosale et al., "Llama 2: Open foundation and fine-tuned chat models," arXiv preprint arXiv:2307.09288, 2023. + +[62] H. Liu, W. Wang, H. Sun, A. Rocha, and H. Li, "Robust domain misinformation detection via multi-modal feature alignment," IEEE Transactions on Information Forensics and Security, 2023. +[63] R. Mao, K. He, C. Ong, Q. Liu, and E. Cambria, “Metapro 2.0: Computational metaphor processing on the effectiveness of anomalous language modeling,” in Findings of the Association for Computational Linguistics ACL 2024, 2024, pp. 9891–9908. +[64] Z. Tan, D. Li, S. Wang, A. Beigi, B. Jiang, A. Bhattacharjee, M. Karami, J. Li, L. Cheng, and H. Liu, "Large language models for data annotation: A survey," arXiv preprint arXiv:2402.13446, 2024. +[65] R. Mao, G. Chen, X. Li, M. Ge, and E. Cambria, "A comparative analysis of metaphorical cognition in chatgpt and human minds," Cognitive Computation, vol. 17, no. 1, p. 35, 2025. +[66] Y. Jia, X. Wu, H. Li, Q. Zhang, Y. Hu, S. Zhao, and W. Fan, "Uni-retrieval: A multi-style retrieval framework for stem's education," arXiv preprint arXiv:2502.05863, 2025. +[67] J. Wei, X. Wang, D. Schuurmans, M. Bosma, F. Xia, E. Chi, Q. V. Le, D. Zhou et al., "Chain-of-thought prompting elicits reasoning in large language models," Advances in neural information processing systems, vol. 35, pp. 24824-24837, 2022. +[68] K. Cobbe, V. Kosaraju, M. Bavarian, M. Chen, H. Jun, L. Kaiser, M. Plappert, J. Tworek, J. Hilton, R. Nakano et al., "Training verifiers to solve math word problems," arXiv preprint arXiv:2110.14168, 2021. +[69] P. Wang, A. Chan, F. Ilievski, M. Chen, and X. Ren, "Pinto: Faithful language reasoning using prompt-generated rationales," in The Eleventh International Conference on Learning Representations, 2023. +[70] P. Wang, Z. Wang, Z. Li, Y. Gao, B. Yin, and X. Ren, "Scott: Self-consistent chain-of-thought distillation," in Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2023, pp. 5546-5558. +[71] H. Liu, Z. Teng, L. Cui, C. Zhang, Q. Zhou, and Y. Zhang, "Logicot: Logical chain-of-thought instruction tuning," in The 2023 Conference on Empirical Methods in Natural Language Processing, 2023. +[72] M. Kang, S. Lee, J. Baek, K. Kawaguchi, and S. J. Hwang, "Knowledge-augmented reasoning distillation for small language models in knowledge-intensive tasks," Advances in Neural Information Processing Systems, vol. 36, 2024. +[73] Y. Li, A. Dao, W. Bao, Z. Tan, T. Chen, H. Liu, and Y. Kong, "Facial affective behavior analysis with instruction tuning," in European Conference on Computer Vision. Springer, 2025, pp. 165-186. +[74] J. Guo, J. Deng, A. Lattas, and S. Zafeiriou, "Sample and computation redistribution for efficient face detection," in International Conference on Learning Representations, 2021. +[75] S. Wegreffer, J. Hessel, S. Swayamdipta, M. Riedl, and Y. Choi, "Reframing human-ai collaboration for generating free-text explanations," in Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 2022, pp. 632–658. +[76] L. Meng, H. Li, B.-C. Chen, S. Lan, Z. Wu, Y.-G. Jiang, and S.-N. Lim, "Adavit: Adaptive vision transformers for efficient image recognition," in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp. 12309-12318. +[77] Z. Fu, L. Zhang, H. Xia, and Z. Mao, "Linguistic-aware patch slimming framework for fine-grained cross-modal alignment," in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 26307-26316. +[78] C. Maddison, A. Mnih, and Y. Teh, "The concrete distribution: A continuous relaxation of discrete random variables," in Proceedings of the international conference on Learning Representations. International Conference on Learning Representations, 2017. +[79] Z. Zong, K. Li, G. Song, Y. Wang, Y. Qiao, B. Leng, and Y. Liu, "Self-slimmed vision transformer," in European Conference on Computer Vision. Springer, 2022, pp. 432-448. +[80] F. Faghri, D. J. Fleet, J. R. Kiros, and S. Fidler, "Vse++: Improving visual-semantic embeddings with hard negatives," arXiv preprint arXiv:1707.05612, 2017. +[81] L. Yang, J. Yu, C. Zhang, and J.-C. Na, "Fine-grained sentiment analysis of political tweets with entity-aware multimodal network," in Diversity, Divergence, Dialogue: 16th International Conference, iConference 2021, Beijing, China, March 17–31, 2021, Proceedings, Part I 16. Springer, 2021, pp. 411–420. +[82] H. W. Chung, L. Hou, S. Longpre, B. Zoph, Y. Tay, W. Fedus, Y. Li, X. Wang, M. Dehghani, S. Brahma et al., "Scaling instructionfinetuned language models," Journal of Machine Learning Research, vol. 25, no. 70, pp. 1-53, 2024. + +[83] I. Loshchilov, "Decoupled weight decay regularization," arXiv preprint arXiv:1711.05101, 2017. +[84] K. He, X. Zhang, S. Ren, and J. Sun, "Deep residual learning for image recognition," in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 770-778. +[85] D. Tang, B. Qin, and T. Liu, "Aspect level sentiment classification with deep memory network," in Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, 2016, pp. 214-224. +[86] F. Fan, Y. Feng, and D. Zhao, "Multi-grained attention network for aspect-level sentiment classification," in Proceedings of the 2018 conference on empirical methods in natural language processing, 2018, pp. 3433-3442. +[87] J. D. M.-W. C. Kenton and L. K. Toutanova, "Bert: Pre-training of deep bidirectional transformers for language understanding," in Proceedings of naacL-HLT, vol. 1. Minneapolis, Minnesota, 2019, p. 2. +[88] N. Xu, W. Mao, and G. Chen, "Multi-interactive memory network for aspect based multimodal sentiment analysis," in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 33, 2019, pp. 371-378. +[89] J. Yu, K. Chen, and R. Xia, "Hierarchical interactive multimodal transformer for aspect-based multimodal sentiment analysis," IEEE Transactions on Affective Computing, vol. 14, no. 3, pp. 1966-1978, 2022. +[90] D. Liu, L. Li, X. Tao, J. Cui, and Q. Xie, "Descriptive prompt paraphrasing for target-oriented multimodal sentiment classification," in Findings of the Association for Computational Linguistics: EMNLP 2023, 2023, pp. 4174-4186. +[91] B. Yang and J. Li, "Visual elements mining as prompts for instruction learning for target-oriented multimodal sentiment classification," in Findings of the Association for Computational Linguistics: EMNLP 2023, 2023, pp. 6062-6075. +[92] J. Camacho-Collados, K. Rezaee, T. Riahi, A. Ushio, D. Loureiro, D. Antypas, J. Boisson, L. E. Anke, F. Liu, and E. Martinez-Camara, "Tweetnlp: Cutting-edge natural language processing for social media," in Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, 2022, pp. 38-49. +[93] J. Ye, J. Zhou, J. Tian, R. Wang, Q. Zhang, T. Gui, and X.-J. Huang, "Rethinkingtmsc: An empirical study for target-oriented multimodal sentiment classification," in Findings of the Association for Computational Linguistics: EMNLP 2023, 2023, pp. 270-277. +[94] M. Ivanova and S. French, The aesthetics of science: beauty, imagination and understanding. Routledge, 2020. + +![](images/b2455a823d706a1fb297782d965eb4fc8120cd085b7a9a145704ebd539f3a434.jpg) + +Luwei Xiao is currently pursuing his Ph.D. degree in the School of Computer Science and Technology at East China Normal University, Shanghai, China, under the supervision of Prof. Liang He. He is presently conducting an academic visit to the College of Computing and Data Science at Nanyang Technological University, Singapore, under the supervision of Prof. Erik Cambria, with funding support from the China Scholarship Council (CSC). His research interests encompass multimodal learning, semi- + +ment analysis, and image aesthetic assessment. + +![](images/590aa9d63be2eebe6cfa3158c9b043df2309dd5b1ad1248286a4339f6c939ac7.jpg) + +Rui Mao is a Research Scientist and Lead Investigator at Nanyang Technological University. He obtained his Ph.D. degree in Computing Science from the University of Aberdeen. His research interest lies in NLP, cognitive computing, and their applications in finance and cognitive science. He and his funded company (Ruimao Tech) have developed an end-to-end system (MetaPro) for computational metaphor processing and a neural search engine (wensousou.com) for searching Chinese ancient po + +ems with modern language. He served as Area Chair in COLING and EMNLP and Associate Editor in IEEE Transactions on Affective Computing, Expert Systems, Information Fusion and Neurocomputing. Contact him at rui.mao@ntu.edu.sg. + +![](images/c68014cf5101a6d4a08998285ee4b085ef6bfd6d34aee5cfdc345bad0334cc9f.jpg) +tacks. + +Shuai Zhao obtained his Ph.D. degree from Jinan University in 2024. He spent one year as a visiting student and six months as a research assistant at the School of Computer Science and Engineering, Nanyang Technological University. He is now a Postdoctoral Researcher at the College of Computing and Data Science, Nanyang Technological University. His current research interests include deep learning and natural language processing for code generation, summary generation, text classification and backdoor at + +![](images/408bcb009bb136bc756c3feeeae37046041ca6f53f35e217e619ad31c595a06a.jpg) + +Qika Lin received his Ph.D. degree at Xi'an Jiaotong University. Currently, he is a Research Fellow at the National University of Singapore. His research interests include natural language processing, knowledge reasoning, and multimodal learning. He has published papers in top-tier journals/conferences, including TKDE, ACL, SIGIR, KDD, ICDE, and IJCAI. He has actively contributed to several journals/conferences as a reviewer or PC member, including TPAMI, IJCV, TKDE, TMC, TNNLS, NeurIPS, ICLR, SIGIR, + +ACL, and EMNLP. He also served as a Guest Editor of IEEE TCSS and Information Fusion. + +![](images/4781794a3325adb4297715bb796cb377152ae80fbf054d40217e4712f2292d98.jpg) + +Yanhao Jia is a phd student at Nanyang Technological University. He obtained his bechealor degree in Computing Science from Shandong University. He has published over seven conference/journal papers on ECCV/NeurIPS/IEEE Trans on nuclear science and been the reviewer for ACM MM and ECCV. + +![](images/055ce189bbd92010fabe2e02ed9ab7e4fe8376936fe577fb386be48061eb9c9e.jpg) + +Liang He received his PhD degree from the Department of Computer Science and Technology, East China Normal University, China. He is now a professor and the Vice Dean of the School of Computer Science and Technology, East China Normal University. His current research interest includes Natural Language Processing, Knowledge Processing, and Human in the Loop for Decision-making. + +![](images/abc09142c4768e0fb2c6c4106a4e36deaa52ea49863b3bb59a2135f5bffcfe98.jpg) + +Erik Cambria is a Professor at Nanyang Technological University, where he also holds the appointment of Provost Chair in Computer Science and Engineering, and Founder of several AI companies, such as SenticNet, offering B2B sentiment analysis services, and finaXai, providing fully explainable financial insights. His research focuses on neurosymbolic AI for interpretable, trustworthy, and explainable affective computing in domains like social media monitoring, financial forecasting, and AI for social + +good. He is an IEEE Fellow, Associate Editor of various top-tier AI journals, e.g., Information Fusion and IEEE Transactions on Affective Computing, and is involved in several international conferences as keynote speaker, program chair and committee member. Contact him at cambria@ntu.edu.sg. \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15848/images/050be5835958eb54c08c32fca9f8350415aab027b5d014443ebe81b11f308c55.jpg b/data/2025/2504_15xxx/2504.15848/images/050be5835958eb54c08c32fca9f8350415aab027b5d014443ebe81b11f308c55.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87c9ec303b4e53751a017e9bb1044172e9797d06 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/050be5835958eb54c08c32fca9f8350415aab027b5d014443ebe81b11f308c55.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:010dd8495d19ec78d4bacc8edd7644c7d8c859336e6a6f010a5fda7aa59b48bb +size 17313 diff --git a/data/2025/2504_15xxx/2504.15848/images/055ce189bbd92010fabe2e02ed9ab7e4fe8376936fe577fb386be48061eb9c9e.jpg b/data/2025/2504_15xxx/2504.15848/images/055ce189bbd92010fabe2e02ed9ab7e4fe8376936fe577fb386be48061eb9c9e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d3ab7ffe25a2ca5a5f808fbee6e13f2b25d86557 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/055ce189bbd92010fabe2e02ed9ab7e4fe8376936fe577fb386be48061eb9c9e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1e38bf11ce692597ffdbf4d8de5ad95aee40d070d9f192489360147e1194d32 +size 5460 diff --git a/data/2025/2504_15xxx/2504.15848/images/08133c089ed25791aec10225277c395d31439bd5d26d91d73a8056da7ca18d6f.jpg b/data/2025/2504_15xxx/2504.15848/images/08133c089ed25791aec10225277c395d31439bd5d26d91d73a8056da7ca18d6f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..331f841b47d2ffca1cefd3d5938042876f17fa01 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/08133c089ed25791aec10225277c395d31439bd5d26d91d73a8056da7ca18d6f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab72f42597f1d8f6339c62265471faad57618fc0bdefe23bf8d21f4860df1938 +size 19268 diff --git a/data/2025/2504_15xxx/2504.15848/images/1b73e9a868e9432fecdd3f367dde5505d76e4a2a2988557afd4f0a3186eaaae7.jpg b/data/2025/2504_15xxx/2504.15848/images/1b73e9a868e9432fecdd3f367dde5505d76e4a2a2988557afd4f0a3186eaaae7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8e2c0bb465607d911d2a282d5859faffe0be8a5c --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/1b73e9a868e9432fecdd3f367dde5505d76e4a2a2988557afd4f0a3186eaaae7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca7eef6a2cb597ddf1c4a25e7bc10922f1927c02c237f76f8842a08783b96d31 +size 4590 diff --git a/data/2025/2504_15xxx/2504.15848/images/27ab10d2e20a003a634285c4bb0ee585657ffcd96585edb897c6af55a29b1391.jpg b/data/2025/2504_15xxx/2504.15848/images/27ab10d2e20a003a634285c4bb0ee585657ffcd96585edb897c6af55a29b1391.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1f168d500c399a671967056ca1bd271860cd7585 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/27ab10d2e20a003a634285c4bb0ee585657ffcd96585edb897c6af55a29b1391.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18c15405224c0dc99589e37a109ea32fc9391cc60e4affc683dcecdcbacaec15 +size 5602 diff --git a/data/2025/2504_15xxx/2504.15848/images/2bca49dd24d43fc768b5a0cdf5379321f0e4de3e014e9c8946bdc65af1ffdd8d.jpg b/data/2025/2504_15xxx/2504.15848/images/2bca49dd24d43fc768b5a0cdf5379321f0e4de3e014e9c8946bdc65af1ffdd8d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73608aa18ba55afc4eaa1f2c2f5be6f43cb3140b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/2bca49dd24d43fc768b5a0cdf5379321f0e4de3e014e9c8946bdc65af1ffdd8d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fefa0e564b8acd7f4816457db94586bd7882f98ce2a45d78c2502ad392ca864d +size 9877 diff --git a/data/2025/2504_15xxx/2504.15848/images/34127dfea685adba1f893830d4ba483e2b00a788a55d69e6eaf849f2801ac0fa.jpg b/data/2025/2504_15xxx/2504.15848/images/34127dfea685adba1f893830d4ba483e2b00a788a55d69e6eaf849f2801ac0fa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a5bf9a3076eb9636d509ba94d843ad15f95a1d20 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/34127dfea685adba1f893830d4ba483e2b00a788a55d69e6eaf849f2801ac0fa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae1ccc1e8270b21914c78f7478d29112ba6826f453e69e4c8649c056d86aa373 +size 3352 diff --git a/data/2025/2504_15xxx/2504.15848/images/3446df51ba868913d3f997aeafdb9b807e000ab1914dcdc123d1ab6bca2ee06f.jpg b/data/2025/2504_15xxx/2504.15848/images/3446df51ba868913d3f997aeafdb9b807e000ab1914dcdc123d1ab6bca2ee06f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c86339ad235181ff77221104a20a12b694c5a9dd --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/3446df51ba868913d3f997aeafdb9b807e000ab1914dcdc123d1ab6bca2ee06f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbad8ba528ea5d84fa99de63d576e565a98d9c7cdde6c9425ecd47304dad80c1 +size 6676 diff --git a/data/2025/2504_15xxx/2504.15848/images/363d59fd750d7001035634b09406a37e863cc3cb442efb4b51e2b8ec1dbc8857.jpg b/data/2025/2504_15xxx/2504.15848/images/363d59fd750d7001035634b09406a37e863cc3cb442efb4b51e2b8ec1dbc8857.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb36e852d1faae297f98599ce9fc18816ba5115f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/363d59fd750d7001035634b09406a37e863cc3cb442efb4b51e2b8ec1dbc8857.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe972217df7256c612ea22f1ee968460984a5af4e28f6bffc322f2995f57b61f +size 5010 diff --git a/data/2025/2504_15xxx/2504.15848/images/393ca141cdf3ad35aa86d194b22a9471891c0a558d4751aa6909446c174e5b0a.jpg b/data/2025/2504_15xxx/2504.15848/images/393ca141cdf3ad35aa86d194b22a9471891c0a558d4751aa6909446c174e5b0a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..03af369e88b3ee22b0e84488c1d29fa75f0c81cf --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/393ca141cdf3ad35aa86d194b22a9471891c0a558d4751aa6909446c174e5b0a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c85a3fc67c5d9e58887a959ac981c43927bfe8f4af2a10f7b1e6f1fc5357b100 +size 43632 diff --git a/data/2025/2504_15xxx/2504.15848/images/3acadf58ed5ee68a3128615d89f084eeca07b2f9bf82d1cd486f6dc56fa12528.jpg b/data/2025/2504_15xxx/2504.15848/images/3acadf58ed5ee68a3128615d89f084eeca07b2f9bf82d1cd486f6dc56fa12528.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c3471413af1721e0eb9ee8fc2fca0d03e8f4bad3 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/3acadf58ed5ee68a3128615d89f084eeca07b2f9bf82d1cd486f6dc56fa12528.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6469d9d4a44587a08efbd3fb8c8e6d47e50aa93041fbd1d24d3d569a769f4332 +size 47920 diff --git a/data/2025/2504_15xxx/2504.15848/images/3b4e213ad8da186c26e88ec593995e95a76be8a8184aef3a371646cd07e7d8ae.jpg b/data/2025/2504_15xxx/2504.15848/images/3b4e213ad8da186c26e88ec593995e95a76be8a8184aef3a371646cd07e7d8ae.jpg new file mode 100644 index 0000000000000000000000000000000000000000..32a4b7a1f5e7d9083314733bf48d4da66aee440f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/3b4e213ad8da186c26e88ec593995e95a76be8a8184aef3a371646cd07e7d8ae.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d9d26222be91decb4aefb0c9cf40b6d1411dddf74fde3da0231cde7a6547c67 +size 7583 diff --git a/data/2025/2504_15xxx/2504.15848/images/3beda8342affcae39beb96594ec67f80b0c0dc46803261013741b2be59f83ad0.jpg b/data/2025/2504_15xxx/2504.15848/images/3beda8342affcae39beb96594ec67f80b0c0dc46803261013741b2be59f83ad0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b717c0b9b45a86b5a38b2900ff9ae5abb38f5380 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/3beda8342affcae39beb96594ec67f80b0c0dc46803261013741b2be59f83ad0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f256facbcb9173f645936a19337c24534b5fa37f6d832dec16e573cb0980449 +size 6123 diff --git a/data/2025/2504_15xxx/2504.15848/images/408bcb009bb136bc756c3feeeae37046041ca6f53f35e217e619ad31c595a06a.jpg b/data/2025/2504_15xxx/2504.15848/images/408bcb009bb136bc756c3feeeae37046041ca6f53f35e217e619ad31c595a06a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c55e5514660be5b7662e02fd0f55e487d82befea --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/408bcb009bb136bc756c3feeeae37046041ca6f53f35e217e619ad31c595a06a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a47e958d04f18715da5c5e56f64e2dad9534bff4208b818c8422d25e0d6abf07 +size 9631 diff --git a/data/2025/2504_15xxx/2504.15848/images/41df244fe389a53760c5227fe3f128250aad87fd1cfc796344b3c7c986c79c10.jpg b/data/2025/2504_15xxx/2504.15848/images/41df244fe389a53760c5227fe3f128250aad87fd1cfc796344b3c7c986c79c10.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ebee54df41bcd43cb32cec7c3302fbee5274d09 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/41df244fe389a53760c5227fe3f128250aad87fd1cfc796344b3c7c986c79c10.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0fa983c7300601423127616c581311a9045335857fa223b9b12ce118a1733137 +size 4771 diff --git a/data/2025/2504_15xxx/2504.15848/images/4781794a3325adb4297715bb796cb377152ae80fbf054d40217e4712f2292d98.jpg b/data/2025/2504_15xxx/2504.15848/images/4781794a3325adb4297715bb796cb377152ae80fbf054d40217e4712f2292d98.jpg new file mode 100644 index 0000000000000000000000000000000000000000..41ed24340460b3c46a5fe95258070ffc2222143f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/4781794a3325adb4297715bb796cb377152ae80fbf054d40217e4712f2292d98.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9cd1525982089f1be613b31c158f7e1386010591e40365733b41843f605fed5 +size 12298 diff --git a/data/2025/2504_15xxx/2504.15848/images/4da63db8ee69d0dc75759d822365f0103e1750ba58561bdf4257661e7d41d2c8.jpg b/data/2025/2504_15xxx/2504.15848/images/4da63db8ee69d0dc75759d822365f0103e1750ba58561bdf4257661e7d41d2c8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..00eac22801d5b083db67674d64feb974ecd51147 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/4da63db8ee69d0dc75759d822365f0103e1750ba58561bdf4257661e7d41d2c8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed0e80e7e23548acc7ba0ce1a198cfdeaa550d0335dfd89dfa4a25935c6ded8a +size 21736 diff --git a/data/2025/2504_15xxx/2504.15848/images/590aa9d63be2eebe6cfa3158c9b043df2309dd5b1ad1248286a4339f6c939ac7.jpg b/data/2025/2504_15xxx/2504.15848/images/590aa9d63be2eebe6cfa3158c9b043df2309dd5b1ad1248286a4339f6c939ac7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2be850088c04461ed9d7bcb171a23adff51368d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/590aa9d63be2eebe6cfa3158c9b043df2309dd5b1ad1248286a4339f6c939ac7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d21fa937b00336a2d220729a9c3176580edc2d8a5cd7d5593fb0c7c1d67497d3 +size 8340 diff --git a/data/2025/2504_15xxx/2504.15848/images/5acc1392ca7e83c57baaccf4589d6ac105a595c6d177ff1ca2d628f3c06f7d50.jpg b/data/2025/2504_15xxx/2504.15848/images/5acc1392ca7e83c57baaccf4589d6ac105a595c6d177ff1ca2d628f3c06f7d50.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2d2c4da1abc3cbe7770ead2c8bd8baf142dfadc6 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/5acc1392ca7e83c57baaccf4589d6ac105a595c6d177ff1ca2d628f3c06f7d50.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82494145a7f84b49b174b422fba717534bea81a41cab7e10f6da6c5484dd6048 +size 4635 diff --git a/data/2025/2504_15xxx/2504.15848/images/5ec5f7b35d7fcc566dac4feb24d838fc7b43767d47b5c05ad3840c83e4fb7886.jpg b/data/2025/2504_15xxx/2504.15848/images/5ec5f7b35d7fcc566dac4feb24d838fc7b43767d47b5c05ad3840c83e4fb7886.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c4189dee040365a478135764a159eb85299a9f57 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/5ec5f7b35d7fcc566dac4feb24d838fc7b43767d47b5c05ad3840c83e4fb7886.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0729beed0925257bb1040ea361f785fce619058c17b3e620132b8de3c5f2f9b1 +size 6112 diff --git a/data/2025/2504_15xxx/2504.15848/images/6f0965427bb1d8fa056516e4f55ca9a310014c9c0542e0231438d95c04190203.jpg b/data/2025/2504_15xxx/2504.15848/images/6f0965427bb1d8fa056516e4f55ca9a310014c9c0542e0231438d95c04190203.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0a7e403757fccb6bd96c838b199dff4397c0a2a6 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/6f0965427bb1d8fa056516e4f55ca9a310014c9c0542e0231438d95c04190203.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad3bccfc1847d07c2ba55000cbe6f11e9cbac1a00883fbfe1654787a4b5d260d +size 5172 diff --git a/data/2025/2504_15xxx/2504.15848/images/703550cb2e9d3a9aa1f9ae05fed89527e34c05dc0ccac49b3b4ddcc077b7558f.jpg b/data/2025/2504_15xxx/2504.15848/images/703550cb2e9d3a9aa1f9ae05fed89527e34c05dc0ccac49b3b4ddcc077b7558f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d7f0da1264eb526f11a03b855a81fb3aefa17ae --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/703550cb2e9d3a9aa1f9ae05fed89527e34c05dc0ccac49b3b4ddcc077b7558f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d950f6f842abefc3ffbd004d0b3cfbfd9e8bb44ed3202ce5293a35fc8448e265 +size 121118 diff --git a/data/2025/2504_15xxx/2504.15848/images/721f8b1ab55a13d424546f2c4625788b66bd13787b6b453dfc2177c5cb573b32.jpg b/data/2025/2504_15xxx/2504.15848/images/721f8b1ab55a13d424546f2c4625788b66bd13787b6b453dfc2177c5cb573b32.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9a6907dbe792f8fa74fc4ccadd45151dbf21a3ce --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/721f8b1ab55a13d424546f2c4625788b66bd13787b6b453dfc2177c5cb573b32.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:007edc35f6158756e14fbe8ee470952db67beedbb033623e8b57f28f13e752ce +size 5096 diff --git a/data/2025/2504_15xxx/2504.15848/images/76169821fecec912e1e9076d4013b612fbcbf9e394366e9d11fb88f543258d70.jpg b/data/2025/2504_15xxx/2504.15848/images/76169821fecec912e1e9076d4013b612fbcbf9e394366e9d11fb88f543258d70.jpg new file mode 100644 index 0000000000000000000000000000000000000000..623d3e03b3bb650ee6e2cee4d49cf763689a32a0 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/76169821fecec912e1e9076d4013b612fbcbf9e394366e9d11fb88f543258d70.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:042c231e706dcf48953bd103b826f4a095835f8ea527ccf12e0a219b98222dcc +size 433011 diff --git a/data/2025/2504_15xxx/2504.15848/images/774a10219b19470c8fdfff9b2a0529c3851b267b0655c78fe280b8d6df1757fd.jpg b/data/2025/2504_15xxx/2504.15848/images/774a10219b19470c8fdfff9b2a0529c3851b267b0655c78fe280b8d6df1757fd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a951d3b44b8ac4b592555506b7f4658a173cdb62 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/774a10219b19470c8fdfff9b2a0529c3851b267b0655c78fe280b8d6df1757fd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba6a65640e17473e90ab1a02a23f06dd8bcbd90dd5680d5b30466af1745d71cc +size 4094 diff --git a/data/2025/2504_15xxx/2504.15848/images/8153f2c7b7698240bccfa3072675717b491e1e93395137231384633f5325fe8a.jpg b/data/2025/2504_15xxx/2504.15848/images/8153f2c7b7698240bccfa3072675717b491e1e93395137231384633f5325fe8a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d9b573a45a910e5fb85aa865b30ca8e675531040 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/8153f2c7b7698240bccfa3072675717b491e1e93395137231384633f5325fe8a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cdefc36c9157e11aff25f3e9164d599375765656d1e78d0dc1e492e3c7e2f4c6 +size 9213 diff --git a/data/2025/2504_15xxx/2504.15848/images/8574a8298aaec88834cdef610089c43a8221f47fe50653661c4c37844e56958f.jpg b/data/2025/2504_15xxx/2504.15848/images/8574a8298aaec88834cdef610089c43a8221f47fe50653661c4c37844e56958f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5fd86d2a47711ca74f197f6dc1c12077ace150f2 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/8574a8298aaec88834cdef610089c43a8221f47fe50653661c4c37844e56958f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1aaad7bb4385d80e40823ca2c9f397082059f508d431a85328268de69c4fe5a3 +size 4662 diff --git a/data/2025/2504_15xxx/2504.15848/images/862dcad0b826d0cd5df4ca36a57269b05df71ee3565aaae7bcebc5ed440fcd2d.jpg b/data/2025/2504_15xxx/2504.15848/images/862dcad0b826d0cd5df4ca36a57269b05df71ee3565aaae7bcebc5ed440fcd2d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6483b23b154ee84f778fdfb8d3cd78cc5f1971de --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/862dcad0b826d0cd5df4ca36a57269b05df71ee3565aaae7bcebc5ed440fcd2d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6226f2e15e49fd4ce253d370e78bdaa5df41e7041eae7928895ad81a6a9b0186 +size 93080 diff --git a/data/2025/2504_15xxx/2504.15848/images/9a52e7f0d93ba6fe3d0396f07a292739097a1fad75755af749ec8dd06624ef3c.jpg b/data/2025/2504_15xxx/2504.15848/images/9a52e7f0d93ba6fe3d0396f07a292739097a1fad75755af749ec8dd06624ef3c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3cd5b147963cce4b9554f65005e4cf65dd867bee --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/9a52e7f0d93ba6fe3d0396f07a292739097a1fad75755af749ec8dd06624ef3c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b1af9f4d69884fd750b0db1fa38f43e53482068274809281fc9ffc34111054d +size 171444 diff --git a/data/2025/2504_15xxx/2504.15848/images/a549ce47bc2931027c87290c6e6c5b42f07ed49211e30ffcf511804b3afa6c17.jpg b/data/2025/2504_15xxx/2504.15848/images/a549ce47bc2931027c87290c6e6c5b42f07ed49211e30ffcf511804b3afa6c17.jpg new file mode 100644 index 0000000000000000000000000000000000000000..506db991daa8dada3098ac7c18121f9a74d1521d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/a549ce47bc2931027c87290c6e6c5b42f07ed49211e30ffcf511804b3afa6c17.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd8573c8fc97befd0256f38b4fd529b5b2fac63c053f83341daa550204a67a53 +size 8370 diff --git a/data/2025/2504_15xxx/2504.15848/images/abc09142c4768e0fb2c6c4106a4e36deaa52ea49863b3bb59a2135f5bffcfe98.jpg b/data/2025/2504_15xxx/2504.15848/images/abc09142c4768e0fb2c6c4106a4e36deaa52ea49863b3bb59a2135f5bffcfe98.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f90e8f2e09108b98b235068ef5712e4b2558ada --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/abc09142c4768e0fb2c6c4106a4e36deaa52ea49863b3bb59a2135f5bffcfe98.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:782b7700cba7ade2d0ba7dcf72889119739cbcd1a0497895b57d056f858b82a4 +size 13370 diff --git a/data/2025/2504_15xxx/2504.15848/images/b2455a823d706a1fb297782d965eb4fc8120cd085b7a9a145704ebd539f3a434.jpg b/data/2025/2504_15xxx/2504.15848/images/b2455a823d706a1fb297782d965eb4fc8120cd085b7a9a145704ebd539f3a434.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fac40e9ba9b751b312f6a861f7befa2399d2f045 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/b2455a823d706a1fb297782d965eb4fc8120cd085b7a9a145704ebd539f3a434.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0949ce7845df3078c96b0b9628626bc220a4d611613a724ffd8c1c009e2c455f +size 6784 diff --git a/data/2025/2504_15xxx/2504.15848/images/b345948b6fc756b2505bb224ee746b6e0583cff242cc6fb931a837f9fd22f931.jpg b/data/2025/2504_15xxx/2504.15848/images/b345948b6fc756b2505bb224ee746b6e0583cff242cc6fb931a837f9fd22f931.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e596d070df796eb8ea50c9209c3288cf4c9ae54c --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/b345948b6fc756b2505bb224ee746b6e0583cff242cc6fb931a837f9fd22f931.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:973eedacb7da888aa276d40ffc1b27ddabe3d42f671eeb8e2d0ba515d46266e1 +size 86050 diff --git a/data/2025/2504_15xxx/2504.15848/images/bafa5c4b0108ccda78786190442994f1286a5fa457afc2bc498ac933f7e041e6.jpg b/data/2025/2504_15xxx/2504.15848/images/bafa5c4b0108ccda78786190442994f1286a5fa457afc2bc498ac933f7e041e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..07c3ed954e9f203d517ff250b4d6a123be0d55bc --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/bafa5c4b0108ccda78786190442994f1286a5fa457afc2bc498ac933f7e041e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d041c13019339eaf93b9fd9de7277b21ec2e18b8a101590330a947d43ad7be7f +size 9401 diff --git a/data/2025/2504_15xxx/2504.15848/images/be9a387bddeba4b58e166a6984c8c5b0867e9a4c5e45612fe0aa5451666328af.jpg b/data/2025/2504_15xxx/2504.15848/images/be9a387bddeba4b58e166a6984c8c5b0867e9a4c5e45612fe0aa5451666328af.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6e926298f7b967ca2cbe07868d8df22eea91dd3f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/be9a387bddeba4b58e166a6984c8c5b0867e9a4c5e45612fe0aa5451666328af.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c1946e107e75811329aa3766d6f1c448dcc10a3372af951cc892b288cc18280 +size 17147 diff --git a/data/2025/2504_15xxx/2504.15848/images/c3a55e0473720311e10e2d4c402da74c36c834e399f554389210047f51a67319.jpg b/data/2025/2504_15xxx/2504.15848/images/c3a55e0473720311e10e2d4c402da74c36c834e399f554389210047f51a67319.jpg new file mode 100644 index 0000000000000000000000000000000000000000..05ba0f8565b86708fd9448f2d93a3295d8c131a9 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/c3a55e0473720311e10e2d4c402da74c36c834e399f554389210047f51a67319.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5eb386732c11b68c0f0d5ab4084298059c554ea0fb02fb7b7241375a478a1160 +size 4773 diff --git a/data/2025/2504_15xxx/2504.15848/images/c68014cf5101a6d4a08998285ee4b085ef6bfd6d34aee5cfdc345bad0334cc9f.jpg b/data/2025/2504_15xxx/2504.15848/images/c68014cf5101a6d4a08998285ee4b085ef6bfd6d34aee5cfdc345bad0334cc9f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a3126a56c36cc10f283a74462b90c812641bd5e2 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/c68014cf5101a6d4a08998285ee4b085ef6bfd6d34aee5cfdc345bad0334cc9f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:082e2255fca4ff4977185927b2c60d7136c74f898f2bc395136b0367454fa7c2 +size 5731 diff --git a/data/2025/2504_15xxx/2504.15848/images/c7f16f703165deced537a0b922aa2f80c5c899a274c670eeeceb35f3ea956d98.jpg b/data/2025/2504_15xxx/2504.15848/images/c7f16f703165deced537a0b922aa2f80c5c899a274c670eeeceb35f3ea956d98.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5c50af5eb43254a78eedf39dcea5224317706771 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/c7f16f703165deced537a0b922aa2f80c5c899a274c670eeeceb35f3ea956d98.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:301445a87062de96d4a9d7af693e9a22879f46a6e91a59e484cb7f624f7f44b0 +size 229022 diff --git a/data/2025/2504_15xxx/2504.15848/images/cf88179055dd0c9392360e11901f16fd775e682b57b2e85efb21afdca61f9ffb.jpg b/data/2025/2504_15xxx/2504.15848/images/cf88179055dd0c9392360e11901f16fd775e682b57b2e85efb21afdca61f9ffb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..76eeffddb1b1b0700b586fe6b9fe964b9dc75637 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/cf88179055dd0c9392360e11901f16fd775e682b57b2e85efb21afdca61f9ffb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b04c00402b4881467f7d19e3e3ee41d58c68e47f56648679243a619d23263d6 +size 3625 diff --git a/data/2025/2504_15xxx/2504.15848/images/d0fccd6c9c3806a0763a2979525d5a81dbcd338f8180464516628387f00a7ac6.jpg b/data/2025/2504_15xxx/2504.15848/images/d0fccd6c9c3806a0763a2979525d5a81dbcd338f8180464516628387f00a7ac6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c3c6d96625707b6b83f7aa2e88c466110e8edf48 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/d0fccd6c9c3806a0763a2979525d5a81dbcd338f8180464516628387f00a7ac6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7d857c886f2a32ebcb04705932f96ea22a60cff7a472db9ab17aeb6163a431c +size 88273 diff --git a/data/2025/2504_15xxx/2504.15848/images/d87cc80f17376d53aa18a021c3dfd53bfc4ebdbcdf4505f4d2c7b8fd1d21d69f.jpg b/data/2025/2504_15xxx/2504.15848/images/d87cc80f17376d53aa18a021c3dfd53bfc4ebdbcdf4505f4d2c7b8fd1d21d69f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ee12bb89788258160210d596b080abf3e753a9fb --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/d87cc80f17376d53aa18a021c3dfd53bfc4ebdbcdf4505f4d2c7b8fd1d21d69f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b23625e45797353f82f0ecbe19d03af479d8ffe9806516c3e44dadf3d92de4a3 +size 4484 diff --git a/data/2025/2504_15xxx/2504.15848/images/d9c00fe46e11a776cce92beb84c5862921e6256a790e3398fc8e625562168895.jpg b/data/2025/2504_15xxx/2504.15848/images/d9c00fe46e11a776cce92beb84c5862921e6256a790e3398fc8e625562168895.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fbde1b6bb1cae0076fba18379e857700ec77a5f8 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/d9c00fe46e11a776cce92beb84c5862921e6256a790e3398fc8e625562168895.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b92be89eb0e9e72878316070488818a6771d31616d73cc6361544a4397b48b35 +size 18788 diff --git a/data/2025/2504_15xxx/2504.15848/images/dcdaa3880792ddf7cb03bb81958c3d68e202fe3298dacecb086cce79a93b452b.jpg b/data/2025/2504_15xxx/2504.15848/images/dcdaa3880792ddf7cb03bb81958c3d68e202fe3298dacecb086cce79a93b452b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69e98cc0fb75360016eca65abdd8158f0ece4d4a --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/dcdaa3880792ddf7cb03bb81958c3d68e202fe3298dacecb086cce79a93b452b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8805bebf1c58afe529a5b0b911f3645d1d03ffdb2529e33d3a4a0a97bb058c6 +size 4529 diff --git a/data/2025/2504_15xxx/2504.15848/images/e2763142f562a1ad3eee2ed812f79fe94383c37426703d5504bad7181405583a.jpg b/data/2025/2504_15xxx/2504.15848/images/e2763142f562a1ad3eee2ed812f79fe94383c37426703d5504bad7181405583a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6d1dca48301d1c29e86e44584e4be2e6ac4e6191 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/e2763142f562a1ad3eee2ed812f79fe94383c37426703d5504bad7181405583a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb2738b14c43e174ba70031a212ee1245a4ec9f0c87e4b446cdc32ff7fbd877a +size 6197 diff --git a/data/2025/2504_15xxx/2504.15848/images/ef430b2b0457b39e9f33224cf48720981aa5772b0bdcca9226f222196ae0fd43.jpg b/data/2025/2504_15xxx/2504.15848/images/ef430b2b0457b39e9f33224cf48720981aa5772b0bdcca9226f222196ae0fd43.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8399a898a0137a227fe46961125f5d458e9153ba --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/ef430b2b0457b39e9f33224cf48720981aa5772b0bdcca9226f222196ae0fd43.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbe46c65d7286cab020b7cf2d3424f89b28a3e4479c20ff0d6b3d520e213f121 +size 19220 diff --git a/data/2025/2504_15xxx/2504.15848/images/f035b5be188bc560f8e6ccdde0472213968c026b15af35b35081f552ab580ea2.jpg b/data/2025/2504_15xxx/2504.15848/images/f035b5be188bc560f8e6ccdde0472213968c026b15af35b35081f552ab580ea2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..453fcdf86f4959118d73a88aa26a8c34fd482866 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/f035b5be188bc560f8e6ccdde0472213968c026b15af35b35081f552ab580ea2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1cc4d608abe74a66b704019740b846cec430d99651f639ae1a0bc325beba9db +size 16500 diff --git a/data/2025/2504_15xxx/2504.15848/images/f4d1c1a9ed4288e54021cae5187de76218c0eecc813a772b6d3020f33e5570c6.jpg b/data/2025/2504_15xxx/2504.15848/images/f4d1c1a9ed4288e54021cae5187de76218c0eecc813a772b6d3020f33e5570c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e5fdcff01e1caf32a938eb5c651815e557a9215b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/f4d1c1a9ed4288e54021cae5187de76218c0eecc813a772b6d3020f33e5570c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74b4dabaa553d8575a32bc7ccec46bc8430cb7effedd96816734552735d4a6d1 +size 103463 diff --git a/data/2025/2504_15xxx/2504.15848/images/f4df37f89c13fdd7683da8ec9fb4436a07840bfc250550b2ef885ed85a4637ca.jpg b/data/2025/2504_15xxx/2504.15848/images/f4df37f89c13fdd7683da8ec9fb4436a07840bfc250550b2ef885ed85a4637ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..99afe89bc45be4ee285cecf11f762de7d58d90cf --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/f4df37f89c13fdd7683da8ec9fb4436a07840bfc250550b2ef885ed85a4637ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7245f6d447559a2f1964db06059e855028fba2ba50ee7143c5931579b870cc60 +size 9919 diff --git a/data/2025/2504_15xxx/2504.15848/images/f8457c3112501699ff84b3cfc411209dbad50c14d2e6528c484b1697dae5c507.jpg b/data/2025/2504_15xxx/2504.15848/images/f8457c3112501699ff84b3cfc411209dbad50c14d2e6528c484b1697dae5c507.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3474545a20105309b6d94c2ee5811f56e97e6e4f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/images/f8457c3112501699ff84b3cfc411209dbad50c14d2e6528c484b1697dae5c507.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f976557d484ee65e75fba6b25c233bb3d0fbbb9a77a02454cd4e3d3a16ae694 +size 26336 diff --git a/data/2025/2504_15xxx/2504.15848/layout.json b/data/2025/2504_15xxx/2504.15848/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..3b8cce01a967cfb634dcaa1cf69df157833ee17b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15848/layout.json @@ -0,0 +1,16997 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 45, + 52, + 564, + 108 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 52, + 564, + 108 + ], + "spans": [ + { + "bbox": [ + 45, + 52, + 564, + 108 + ], + "type": "text", + "content": "Exploring Cognitive and Aesthetic Causality for Multimodal Aspect-Based Sentiment Analysis" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 119, + 119, + 490, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 119, + 490, + 148 + ], + "spans": [ + { + "bbox": [ + 119, + 119, + 490, + 148 + ], + "type": "text", + "content": "Luwei Xiao, Student Member, IEEE, Rui Mao*, Member, IEEE, Shuai Zhao, Qika Lin, Yanhao Jia, Liang He, and Erik Cambria, Fellow, IEEE" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 64, + 165, + 544, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 165, + 544, + 312 + ], + "spans": [ + { + "bbox": [ + 64, + 165, + 544, + 312 + ], + "type": "text", + "content": "Abstract—Multimodal aspect-based sentiment classification (MASC) is an emerging task due to an increase in user-generated multimodal content on social platforms, aimed at predicting sentiment polarity toward specific aspect targets (i.e., entities or attributes explicitly mentioned in text-image pairs). Despite extensive efforts and significant achievements in existing MASC, substantial gaps remain in understanding fine-grained visual content and the cognitive rationales derived from semantic content and impressions (cognitive interpretations of emotions evoked by image content). In this study, we present Chimera: a cognitive and aesthetic sentiment causality understanding framework to derive fine-grained holistic features of aspects and infer the fundamental drivers of sentiment expression from both semantic perspectives and affective-cognitive resonance (the synergistic effect between emotional responses and cognitive interpretations). Specifically, this framework first incorporates visual patch features for patch-word alignment. Meanwhile, it extracts coarse-grained visual features (e.g., overall image representation) and fine-grained visual regions (e.g., aspect-related regions) and translates them into corresponding textual descriptions (e.g., facial, aesthetic). Finally, we leverage the sentimental causes and impressions generated by a large language model (LLM) to enhance the model's awareness of sentimental cues evoked by semantic content and affective-cognitive resonance. Experimental results on standard MASC datasets demonstrate the effectiveness of the proposed model, which also exhibits greater flexibility to MASC compared to LLMs such as GPT-4o. We have publicly released the complete implementation and dataset at https://github.com/Xillv/Chimera" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 64, + 320, + 526, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 320, + 526, + 342 + ], + "spans": [ + { + "bbox": [ + 64, + 320, + 526, + 342 + ], + "type": "text", + "content": "Index Terms—Multimodal aspect-based sentiment classification, Sentiment causality, Large language models, Affective-cognitive resonance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 366, + 141, + 379 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 366, + 141, + 379 + ], + "spans": [ + { + "bbox": [ + 45, + 366, + 141, + 379 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 388, + 301, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 388, + 301, + 632 + ], + "spans": [ + { + "bbox": [ + 44, + 388, + 301, + 632 + ], + "type": "text", + "content": "MULTIMODAL aspect-based sentiment classification (MASC) is a valuable task for analyzing user-generated multimodal content on social platforms, aiming to predict the sentiment polarity of a specific target/aspect term within a sentence, based on an image-text pair. In an era marked by growing global interconnectedness, social platforms have become essential channels for individuals to express opinions and share experiences [1]-[3]. These platforms support multimodal content, blending text and visual media, which better reflects how sentiment is conveyed [4]. Consequently, analyzing fine-grained sentiment expression in multimodal scenarios not only improves the depth of sentiment classification but also aligns with the natural manner in which users express opinions and emotions, ultimately supporting more accurate sentiment analysis for applications in finance [5], [6], social research [7], [8], and human-computer interaction [9], [10]. Current methodologies for MASC can be broadly divided into two principal categories: visual-text fusion-based approaches and translation-based approaches. Visual-text fusion-based methods address MASC by directly integrating visual content with" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 308, + 388, + 564, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 388, + 564, + 411 + ], + "spans": [ + { + "bbox": [ + 308, + 388, + 564, + 411 + ], + "type": "text", + "content": "textual features through various attention-based mechanisms [11]-[16]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 411, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 411, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 411, + 566, + 748 + ], + "type": "text", + "content": "Yu et al. [11] were the first to propose the utilization of ResNet for image feature extraction in conjunction with BERT for language sequence modeling, subsequently feeding these components into a BERT encoder to facilitate the interactive modeling of cross-modal representations. Ling et al. [14] introduced a vision-language pre-training framework that leverages Faster R-CNN for extracting object-level visual features and BART for generating textual features, with the model pre-trained using three task-specific strategies targeting the language, vision, respectively. Yu et al. [13] presented a novel multi-task learning framework Image-Target Matching Network (ITM), which concurrently performs coarse-to-fine-grained visual-textual relevance detection and visual object-target alignment through cross-modal Transformers. Translation-based approaches focus on mapping visual content into the language space as auxiliary textual representations, leveraging this supplementary information, or integrating it with visual features to enhance MASC [17]-[22]. Khan et al. [17] translated the image into a corresponding caption, which is then jointly input with the sentence into BERT to predict the sentiment polarity associated with specific targets. Yang et al. [23] exploit a face-sensitive, translation-based approach that translates facial expressions in images into textual sentiment cues, which are then selectively aligned and fused with the targets for enhanced sentiment analysis. Xiao et al. [19] proposed the CoolNet framework, which generates visual captions for images and extracts syntactic and semantic features from the textual" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 317, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 317, + 34 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 317, + 34 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 44, + 650, + 301, + 677 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 650, + 301, + 677 + ], + "spans": [ + { + "bbox": [ + 44, + 650, + 301, + 677 + ], + "type": "text", + "content": "Luwei Xiao, and Liang He are with the School of Computer Science and Technology, East China Normal University, Shanghai 200062, China. E-mail: louisshaw@stu.ecnu.edu.cn, lhe@cs.ecnu.edu.cn" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 44, + 677, + 301, + 714 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 677, + 301, + 714 + ], + "spans": [ + { + "bbox": [ + 44, + 677, + 301, + 714 + ], + "type": "text", + "content": "- Rui Mao, Shuai Zhao, Yanhao Jia and Erik Cambria are with the College of Computing and Data Science, Nanyang Technological University, Singapore 639798. E-mail:{rui.mao, shuai.zhao, cambria}@ntu.edu.sg, yanhao002@e.ntu.edu.sg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 714, + 301, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 714, + 301, + 733 + ], + "spans": [ + { + "bbox": [ + 45, + 714, + 301, + 733 + ], + "type": "text", + "content": "Qika Lin is with the Saw Swee Hock School of Public Health, National University of Singapore 119077. E-mail: linqika@nus.edu.sg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 735, + 159, + 746 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 735, + 159, + 746 + ], + "spans": [ + { + "bbox": [ + 46, + 735, + 159, + 746 + ], + "type": "text", + "content": "* Corresponding author: Rui Mao" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 14, + 209, + 37, + 561 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 209, + 37, + 561 + ], + "spans": [ + { + "bbox": [ + 14, + 209, + 37, + 561 + ], + "type": "text", + "content": "arXiv:2504.15848v1 [cs.CL] 22 Apr 2025" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 42, + 301, + 65 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 42, + 301, + 65 + ], + "spans": [ + { + "bbox": [ + 45, + 42, + 301, + 65 + ], + "type": "text", + "content": "modality, subsequently fusing these with visual features through a cross-modal Transformer." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 77, + 301, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 77, + 301, + 307 + ], + "spans": [ + { + "bbox": [ + 44, + 77, + 301, + 307 + ], + "type": "text", + "content": "Despite substantial efforts and promising advancements, current solutions continue to encounter the following challenges. First, excessive duplicative visual patches can overshadow critical visual clues relevant to the specific target, leading to considerable misalignment during patch-token interactions. These small visual patches often lack semantic coherence compared to complete visual regions, particularly when aligning targets with their corresponding objects in an image, potentially leading to ambiguous semantic representations. Second, limited studies have focused on the underlying rationale behind sentiment cues, particularly from the perspectives of semantic content and affective-cognitive resonance. Owing to the multimodal nature of Twitter content, which spans diverse facets of daily life, inferring the sentiment associated with specific targets necessitates not only an understanding of the surface-level information in text and images (e.g., facial expressions) but also an in-depth comprehension of the contextual background of particular events and the impressions evoked by the image's content and aesthetic attributes." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 308, + 301, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 308, + 301, + 595 + ], + "spans": [ + { + "bbox": [ + 44, + 308, + 301, + 595 + ], + "type": "text", + "content": "To address the aforementioned challenges, this paper proposes Chimera: a cognitive and aesthetic sentiment causality understanding framework. This framework aims to incorporate and align fine-grained features of specific targets and reasons about semantic and impression rationales. However, two critical issues must be resolved to achieve these objectives: 1) How can specific targets in a sentence be aligned with their corresponding object-level fine-grained features in an image? 2) How can the model be enabled to reason about the emotional causal reasons within the semantic content of image-text pairs and the affective resonance evoked by image aesthetic attributes? For the first question, we propose to make the cross-modal alignment of the target via the visual patch-level by linguistic-aware patch-token alignment and object-level by accurately translating the object feature into language space. Regarding the second issue, while a recent study [24] developed a reasoning dataset for MASC, this dataset primarily explains the emotional causes within textual content and lacks reasoning capabilities for visual content and the affective resonance evoked by images, limiting its suitability for the multimodal nature of this task. Consequently, we employ a large language model (LLM), GPT-4o, to generate the semantic rationale and impression rationale to understand the causal foundations of emotions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 596, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 596, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 596, + 301, + 746 + ], + "type": "text", + "content": "Specifically, our proposed framework first extracts visual patch-level and textual features, feeding them into a tailored linguistic-aware patch-token alignment (LPA) module to achieve patch-token alignment. Concurrently, a translation module (TM) translates the holistic image or object-level content into aesthetic captions or facial descriptions, leveraging multimodal named entity annotations from the work of Wang et al. [25]. The TM-generated text, along with the sentence and aspect, is then input into a generative module for multi-task learning to produce sentiment polarity, semantic rationale (SR), and impression rationale (IR). By bootstrapping the model's perception of underlying rationale through an in-depth understanding of textual and" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 308, + 42, + 564, + 76 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 564, + 76 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 564, + 76 + ], + "type": "text", + "content": "visual content as well as the affective resonance evoked by images, it enhances the performance of sentiment classification." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 323, + 76, + 557, + 88 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 76, + 557, + 88 + ], + "spans": [ + { + "bbox": [ + 323, + 76, + 557, + 88 + ], + "type": "text", + "content": "In a nutshell, the primary contributions are as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 323, + 93, + 564, + 324 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 323, + 93, + 564, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 93, + 564, + 163 + ], + "spans": [ + { + "bbox": [ + 323, + 93, + 564, + 163 + ], + "type": "text", + "content": "- We propose a novel framework for MASC that aligns specific targets with their corresponding visual objects at the patch-token and object levels while equipping the model with causal rationale reasoning ability for semantic rationale (SR), and impression rationale (IR)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 323, + 163, + 564, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 163, + 564, + 266 + ], + "spans": [ + { + "bbox": [ + 323, + 163, + 564, + 266 + ], + "type": "text", + "content": "- We approach this task by enabling the model to grasp the semantic content of image-text pairs and the affective resonance evoked by images. To our knowledge, we are the first to collect semantic and impression rationale data for the MASC task, based on existing MASC datasets, extending its content to incorporate semantic and impression rationale, offering a valuable resource for advancing MASC research." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 323, + 266, + 564, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 266, + 564, + 324 + ], + "spans": [ + { + "bbox": [ + 323, + 266, + 564, + 324 + ], + "type": "text", + "content": "- Experiments on three widely-used Twitter benchmarks demonstrate that our proposed method outperforms previous approaches, achieving state-of-the-art performance. Further evaluations validate the effectiveness of our approach for MASC tasks." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 308, + 329, + 566, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 329, + 566, + 445 + ], + "spans": [ + { + "bbox": [ + 308, + 329, + 566, + 445 + ], + "type": "text", + "content": "The remainder of this paper is organized as follows: Section 2 provides an overview of related research on multimodal aspect-based sentiment classification, image aesthetic assessment, and multimodal learning. Section 3 details the proposed framework, including linguistics-aware patchtoken alignment, the translation-based module, causal rationale dataset construction, and LLM-based annotation generation. Main experimental results are presented in Section 4, and the in-depth analysis is shown in 5, followed by conclusions in Section 6." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 309, + 460, + 411, + 471 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 460, + 411, + 471 + ], + "spans": [ + { + "bbox": [ + 309, + 460, + 411, + 471 + ], + "type": "text", + "content": "2 RELATED WORK" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 475, + 564, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 475, + 564, + 522 + ], + "spans": [ + { + "bbox": [ + 308, + 475, + 564, + 522 + ], + "type": "text", + "content": "This section reviews key methods in multimodal aspect-based sentiment analysis and image aesthetic assessment. Additionally, as our novel rationale dataset is constructed using an LLM, we introduce LLMs for data annotation." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 535, + 541, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 535, + 541, + 548 + ], + "spans": [ + { + "bbox": [ + 308, + 535, + 541, + 548 + ], + "type": "text", + "content": "2.1 Multimodal Aspect-based Sentiment Analysis" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 550, + 565, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 550, + 565, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 550, + 565, + 748 + ], + "type": "text", + "content": "Sentiment analysis is a well-established research area focused on understanding and identifying human emotions and opinions across various contexts [26]–[31]. With the exponential growth of user-generated multimodal content (e.g., image-text pairs, video clips) on social platforms [32]–[35] has drawn substantial attention to Multimodal Aspect-based Sentiment Analysis (MABSA) [36]–[40]. The MABSA task consists of two sub-tasks: Multimodal Aspect Term Extraction (MATE) and our focused MASC task. MATE [41] is essentially a named entity recognition task aimed at identifying all relevant specific targets within the textual content of an image-text pair. MASC [42], [43] is a text classification task in which specific targets are provided, requiring the identification of their sentiment polarity (positive, neutral, or negative) based on the given image-text pair. A series of recent studies have successfully unified these two subtasks into a single framework, effectively" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 317, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 317, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 317, + 35 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 44, + 558, + 315 + ], + "blocks": [ + { + "bbox": [ + 53, + 44, + 558, + 315 + ], + "lines": [ + { + "bbox": [ + 53, + 44, + 558, + 315 + ], + "spans": [ + { + "bbox": [ + 53, + 44, + 558, + 315 + ], + "type": "image", + "image_path": "c7f16f703165deced537a0b922aa2f80c5c899a274c670eeeceb35f3ea956d98.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 44, + 328, + 565, + 349 + ], + "lines": [ + { + "bbox": [ + 44, + 328, + 565, + 349 + ], + "spans": [ + { + "bbox": [ + 44, + 328, + 565, + 349 + ], + "type": "text", + "content": "Fig. 1. The overall framework of the proposed Chimera. Chimera consists of four parts: Translation Module, Rationale Dataset Construction, Linguistic-aware Semantic Alignment, and Rationale-Aware Learning." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 367, + 301, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 367, + 301, + 587 + ], + "spans": [ + { + "bbox": [ + 44, + 367, + 301, + 587 + ], + "type": "text", + "content": "streamlining the MABSA process [14], [15], [22], [44]–[47]. Among these studies, Yu et al. [12] proposed the Entity-Sensitive Attention and Fusion Network (ESAFN), which employs entity-oriented attention combined with a visual gate mechanism to model entity-sensitive inter-dynamics for MASC. Ju et al. [44] were the first to integrate MATE and MASC into a end-to-end task, developing a joint learning framework with cross-modal relation detection. Kruk et al. [35] proposed a multimodal framework for Instagram intent detection, integrating three taxonomies and the MDID dataset. It demonstrates that text-image fusion enhances accuracy by " + }, + { + "bbox": [ + 44, + 367, + 301, + 587 + ], + "type": "inline_equation", + "content": "9.6\\%" + }, + { + "bbox": [ + 44, + 367, + 301, + 587 + ], + "type": "text", + "content": " under semiotic divergence, emphasizing the necessity of multimodal models for capturing the non-intersective \"meaning multiplication\" inherent in social media. Yang et al. [15] improved cross-modal alignment modeling through a Transformer-based multi-task learning framework, incorporating text-guided cross-modal interactions and using adjective-noun pairs as supervision for a visual auxiliary task." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 596, + 301, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 596, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 596, + 301, + 746 + ], + "type": "text", + "content": "Zhou et al. [16] developed an aspect-oriented multimodal fusion approach that constructs an informative dependency graph to minimize additional visual and textual noise in cross-modal interactions by selectively processing aspect-relevant textual and image features. Huang et al. [20] put forward to mapping images into scene graphs, using triplet semantic relationships among entities along with image captions to construct a relatedness matrix for achieving cross-modal alignment in MASC. More recently, Xiao et al. [22] introduced the Atlantis, a trident-shaped architecture that incorporates aesthetic attributes to enhance the emotional resonance of visual content. Fan et al. [24] devised a Flant5-based multi-task learning architecture to enhance the" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 308, + 367, + 564, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 367, + 564, + 402 + ], + "spans": [ + { + "bbox": [ + 308, + 367, + 564, + 402 + ], + "type": "text", + "content": "model's reasoning capabilities for inferring underlying and direct causes of sentiment expressions. Additionally, they constructed a practical causal dataset for MASC." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 308, + 403, + 564, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 403, + 564, + 449 + ], + "spans": [ + { + "bbox": [ + 308, + 403, + 564, + 449 + ], + "type": "text", + "content": "Our proposed method aims to achieve cross-modal alignment at the patch and object levels while equipping the model with reasoning capabilities to discern the semantic and impression rationale underlying sentiment expressions." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 309, + 465, + 468, + 477 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 465, + 468, + 477 + ], + "spans": [ + { + "bbox": [ + 309, + 465, + 468, + 477 + ], + "type": "text", + "content": "2.2 Image Aesthetic Assessment" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 307, + 480, + 565, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 480, + 565, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 480, + 565, + 748 + ], + "type": "text", + "content": "Image aesthetics play a fundamental role in shaping viewers' emotional responses and overall aesthetic experience through complex psychological and cognitive processes [48]. Image aesthetics pertain to the subjective evaluation and appreciation of its beauty [49]. Image Aesthetic Assessment seeks to systematically appraise this aesthetic quality by analyzing the visual appeal of images [50]. Empirical psychological research corroborates that images can trigger a wide range of emotions, which are influenced by their aesthetic attributes and semantic content [51]. Previous research concentrated on aesthetic image captioning and analysis through the aggregation of commentary on aesthetic attributes [52]. These studies address the concepts of style, layout, and aesthetics from the viewpoints of beauty and visual attractiveness. Recent scholarly efforts have focused on encouraging vision-language models to generate visual connotations and captions related to various aesthetic attributes (e.g., color, harmony, lighting, composition) [53]. More recently, Kruk et al. [54] introduced a connotation-rich dataset, Impressions, designed to explore the emotions, thoughts, and beliefs that images evoke, along with the aesthetic elements that elicit these responses. The introduction of this dataset marks a significant advance in the study of" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 64 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 64 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 64 + ], + "type": "text", + "content": "how visual stimuli can influence complex perceptual and emotional outcomes." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 65, + 301, + 136 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 65, + 301, + 136 + ], + "spans": [ + { + "bbox": [ + 44, + 65, + 301, + 136 + ], + "type": "text", + "content": "In this study, we utilize aesthetic attributes to capture sentiment cues within visual content at both object and holistic levels. Inspired by Impressions [54], we further prompt the LLM to generate impression rationales for MASC, enabling analysis of the underlying affective resonance evoked by images." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 45, + 149, + 226, + 161 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 149, + 226, + 161 + ], + "spans": [ + { + "bbox": [ + 45, + 149, + 226, + 161 + ], + "type": "text", + "content": "2.3 LLMs-Based Rationale Generation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 164, + 301, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 164, + 301, + 487 + ], + "spans": [ + { + "bbox": [ + 44, + 164, + 301, + 487 + ], + "type": "text", + "content": "Recently, LLMs have achieved significant success across various downstream tasks [55]–[58]. LLMs such as GPT-40 [59], Gemini [60], and LLaMA-2 [61] hold significant potential to usher data annotation into a new era, functioning not merely as auxiliary tools but as vital enhancers of its effectiveness and quality [62], [63]. LLMs can automatically annotate samples, ensure consistency across large data volumes, and adapt to specific domains via fine-tuning, thereby establishing a new standard in deep learning [64]–[66]. The rationale represents the detailed cognitive process an individual typically follows when solving a problem, providing useful supplementary information for the final answer [67]. Early studies [68] typically relied on human experts to annotate rationale in datasets, significantly limiting availability and scalability. A bunch of diverse methodologies have been developed to produce high-quality and fine-grained rationale. Wang et al. [69] proposed to elucidate each choice in a sample by generating choice-specific rationales via LLMs. Wang et al. [70] enhanced the credibility of generated rationales by incorporating gold-standard answers and using contrastive decoding algorithms. Liu et al. [71] laid much emphasis on curating high-quality prompts to obtain fine-grained rationales from GPT-4o and build a logical chain-of-thought instruction-tuning dataset. More recently, Kang et al. [72] developed a sophisticated neural reranking mechanism to dynamically retrieve highly relevant supplementary documents for generating high-quality rationales in knowledge-intensive reasoning tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 487, + 301, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 487, + 301, + 559 + ], + "spans": [ + { + "bbox": [ + 44, + 487, + 301, + 559 + ], + "type": "text", + "content": "In this paper, we build upon the work of Wang et al. [70] by fully utilizing the dataset's gold-standard annotations to generate semantic and impression rationales through meticulously designed prompts. This approach ensures high-quality rationale generation while avoiding additional costs from trial-and-error OpenAI API usage fees." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 574, + 144, + 585 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 574, + 144, + 585 + ], + "spans": [ + { + "bbox": [ + 45, + 574, + 144, + 585 + ], + "type": "text", + "content": "3 METHODOLOGY" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 589, + 301, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 589, + 301, + 659 + ], + "spans": [ + { + "bbox": [ + 44, + 589, + 301, + 659 + ], + "type": "text", + "content": "This section presents our proposed framework for MASC, beginning with the task formalization, followed by the rationale dataset construction process, and concluding with the proposed method, comprising linguistic-aware semantic alignment, a translation module, rationale dataset construction and a rationale-aware learning framework." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 673, + 138, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 673, + 138, + 685 + ], + "spans": [ + { + "bbox": [ + 45, + 673, + 138, + 685 + ], + "type": "text", + "content": "3.1 Task Definition" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 44, + 688, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 688, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 688, + 301, + 748 + ], + "type": "text", + "content": "Given a multimodal dataset " + }, + { + "bbox": [ + 44, + 688, + 301, + 748 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 44, + 688, + 301, + 748 + ], + "type": "text", + "content": ", each sample " + }, + { + "bbox": [ + 44, + 688, + 301, + 748 + ], + "type": "inline_equation", + "content": "X_{i}" + }, + { + "bbox": [ + 44, + 688, + 301, + 748 + ], + "type": "text", + "content": " consists of an image " + }, + { + "bbox": [ + 44, + 688, + 301, + 748 + ], + "type": "inline_equation", + "content": "V_{i}" + }, + { + "bbox": [ + 44, + 688, + 301, + 748 + ], + "type": "text", + "content": " paired with a sentence " + }, + { + "bbox": [ + 44, + 688, + 301, + 748 + ], + "type": "inline_equation", + "content": "S_{i}" + }, + { + "bbox": [ + 44, + 688, + 301, + 748 + ], + "type": "text", + "content": " containing one or more specific targets " + }, + { + "bbox": [ + 44, + 688, + 301, + 748 + ], + "type": "inline_equation", + "content": "T_{i}" + }, + { + "bbox": [ + 44, + 688, + 301, + 748 + ], + "type": "text", + "content": ". The goal of MASC is to predict the sentiment polarity " + }, + { + "bbox": [ + 44, + 688, + 301, + 748 + ], + "type": "inline_equation", + "content": "Y_{i} \\in \\{\\text{Positive}, \\text{Negative}, \\text{Neutral}\\}" + }, + { + "bbox": [ + 44, + 688, + 301, + 748 + ], + "type": "text", + "content": " for a specific target " + }, + { + "bbox": [ + 44, + 688, + 301, + 748 + ], + "type": "inline_equation", + "content": "T_{i}" + }, + { + "bbox": [ + 44, + 688, + 301, + 748 + ], + "type": "text", + "content": ". Moreover, our framework infers" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "spans": [ + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "text", + "content": "both semantic rationale " + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "inline_equation", + "content": "SR_{i}" + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "text", + "content": " and impression rationale " + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "inline_equation", + "content": "IR_{i}" + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "text", + "content": ", explaining the sentiment prediction " + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "inline_equation", + "content": "Y_{i}" + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "text", + "content": " for a specific target " + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "inline_equation", + "content": "T_{i}" + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "text", + "content": ", based on multimodal semantic meaning and the affective resonance evoked by the image. In this study, the model outputs " + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "inline_equation", + "content": "SR_{i}, IR_{i}, Y_{i}" + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "text", + "content": " for an input sample " + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "inline_equation", + "content": "X_{i} = (S_{i}, V_{i}, T_{i})" + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "inline_equation", + "content": "SR_{i}" + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "inline_equation", + "content": "IR_{i}" + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "text", + "content": " offer supplementary sentimental cues for sentiment prediction " + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "inline_equation", + "content": "T_{i}" + }, + { + "bbox": [ + 307, + 42, + 566, + 125 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 309, + 137, + 414, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 137, + 414, + 148 + ], + "spans": [ + { + "bbox": [ + 309, + 137, + 414, + 148 + ], + "type": "text", + "content": "3.2 Method Overview" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 152, + 566, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 152, + 566, + 419 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 566, + 419 + ], + "type": "text", + "content": "As shown in Figure 1, our proposed framework comprises four technical components, namely a Translation Module, Rationale Dataset Construction, Linguistic-aware Semantic Alignment, and Rationale-Aware Learning. The Translation Module converts visual content, both holistic and object-level, into language captions. For entire images, it generates emotion-laden aesthetic captions using our fine-tuned BLIP. For object-level content, it maps visuals to facial descriptions or aesthetic captions with rich emotional cues via EmoLA or our fine-tuned BLIP. The construction of the rationale dataset involves generating semantic and impression rationales. We curate prompts tailored to each rationale category and input them, along with the samples, into GPT-4o to collect the desired rationales. The Linguistic-aware Semantic Alignment module segments the input image into patches, dynamically selects and refines relevant visual patches, and achieves patch-token alignment guided by linguistic features from the input sentence. Lastly, we propose a Rationale-Aware Learning framework built up on a generative model that simultaneously learns sentiment classification, semantic rationale generation, and impression rationale generation from diverse textual inputs, such as sentences, aesthetic captions, and facial descriptions." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 309, + 431, + 422, + 442 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 431, + 422, + 442 + ], + "spans": [ + { + "bbox": [ + 309, + 431, + 422, + 442 + ], + "type": "text", + "content": "3.3 Translation Module" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 446, + 566, + 643 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 446, + 566, + 643 + ], + "spans": [ + { + "bbox": [ + 307, + 446, + 566, + 643 + ], + "type": "text", + "content": "This module translates visual content into overall aesthetic captions, object-level facial descriptions, or object-level aesthetic captions in textual form, embedding rich sentimental cues to facilitate object-level sentiment alignment. Specifically, we leverage object annotations from the Fine-Grained Multimodal Named Entity Recognition (MNER) task [25], which annotates specific targets in the sentence and their corresponding objects in the image. The MNER dataset is derived from the same Twitter dataset as the MASC datasets, incorporating the original image-text pairs from MASC. We further pre-process the MNER dataset and transfer its object annotations to the MASC dataset. To generate aesthetic captions rich in sentimental cues, we fine-tune a BLIP model using the recent aesthetic-specific dataset, Impression [54]. For facial description, we deploy the LLM-based EmoLA [73] to interpret fine-grained human mental states from images." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 307, + 643, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 643, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 643, + 566, + 748 + ], + "type": "text", + "content": "To tackle the challenge of potential one-to-many annotation scenarios, wherein multiple visual objects correspond to a specific target in the sentence, we calculate the similarity between the entire image and all object annotations, retaining only the object with the highest similarity score for each specific target. Subsequently, we generate various textual auxiliary sentences, based on object annotations. Firstly, in cases where the object corresponding to a specific target is absent from the image, a fine-tuned BLIP model is applied to" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 317, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 317, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 317, + 35 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 41, + 299, + 66 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 41, + 299, + 66 + ], + "spans": [ + { + "bbox": [ + 44, + 41, + 299, + 66 + ], + "type": "text", + "content": "generate an overall aesthetic caption " + }, + { + "bbox": [ + 44, + 41, + 299, + 66 + ], + "type": "inline_equation", + "content": "A^{c} = \\left(a_{1}^{c}, a_{2}^{c}, \\ldots, a_{N_{c}}^{c}\\right)" + }, + { + "bbox": [ + 44, + 41, + 299, + 66 + ], + "type": "text", + "content": " for the entire image:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 131, + 71, + 299, + 85 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 131, + 71, + 299, + 85 + ], + "spans": [ + { + "bbox": [ + 131, + 71, + 299, + 85 + ], + "type": "interline_equation", + "content": "A ^ {c} = B L I P _ {\\text {f i n e}} (V), \\tag {1}", + "image_path": "34127dfea685adba1f893830d4ba483e2b00a788a55d69e6eaf849f2801ac0fa.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 89, + 301, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 89, + 301, + 183 + ], + "spans": [ + { + "bbox": [ + 44, + 89, + 301, + 183 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 44, + 89, + 301, + 183 + ], + "type": "inline_equation", + "content": "BLIP_{fine}(\\cdot)" + }, + { + "bbox": [ + 44, + 89, + 301, + 183 + ], + "type": "text", + "content": " is the fine-tuned BLIP over Impression dataset. If the object corresponding to a specific target is present in the image, we develop a Human-Object Differentiation (HOD) module based on the Sample and Computation Redistribution for Efficient Face Detection (SCRFD) [74] framework. This module determines the presence of a face within the annotated object-level visual content and assigns a facial binary label:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 129, + 187, + 299, + 202 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 187, + 299, + 202 + ], + "spans": [ + { + "bbox": [ + 129, + 187, + 299, + 202 + ], + "type": "interline_equation", + "content": "Y _ {i} ^ {o _ {j}} = H O D \\left(V _ {i} ^ {o _ {j}}\\right), \\tag {2}", + "image_path": "cf88179055dd0c9392360e11901f16fd775e682b57b2e85efb21afdca61f9ffb.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 206, + 301, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 206, + 301, + 277 + ], + "spans": [ + { + "bbox": [ + 44, + 206, + 301, + 277 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 44, + 206, + 301, + 277 + ], + "type": "inline_equation", + "content": "Y_{i}^{o_{j}} \\in [1,0]" + }, + { + "bbox": [ + 44, + 206, + 301, + 277 + ], + "type": "text", + "content": " indicates whether the object-level visual content contains a face (0 for no face, 1 for face detected), and " + }, + { + "bbox": [ + 44, + 206, + 301, + 277 + ], + "type": "inline_equation", + "content": "V_{i}^{o_{j}}" + }, + { + "bbox": [ + 44, + 206, + 301, + 277 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 44, + 206, + 301, + 277 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 44, + 206, + 301, + 277 + ], + "type": "text", + "content": "-th object-level visual content in the " + }, + { + "bbox": [ + 44, + 206, + 301, + 277 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 44, + 206, + 301, + 277 + ], + "type": "text", + "content": "-th image. Subsequently, we generate facial descriptions or aesthetic captions for object-level visual content based on the facial binary label:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 93, + 281, + 299, + 312 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 281, + 299, + 312 + ], + "spans": [ + { + "bbox": [ + 93, + 281, + 299, + 312 + ], + "type": "interline_equation", + "content": "A ^ {o} = \\left\\{ \\begin{array}{l l} E m o L A \\left(V _ {i} ^ {o _ {j}}\\right), & \\text {i f} Y _ {i} ^ {o _ {j}} = 1, \\\\ B L I P _ {\\text {f i n e}} \\left(V _ {i} ^ {o _ {j}}\\right), & \\text {o t h e r w i s e}, \\end{array} \\right. \\tag {3}", + "image_path": "a549ce47bc2931027c87290c6e6c5b42f07ed49211e30ffcf511804b3afa6c17.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 317, + 301, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 317, + 301, + 353 + ], + "spans": [ + { + "bbox": [ + 44, + 317, + 301, + 353 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 44, + 317, + 301, + 353 + ], + "type": "inline_equation", + "content": "A^o = (a_1^o, a_2^o, \\ldots, a_{N_o}^o)" + }, + { + "bbox": [ + 44, + 317, + 301, + 353 + ], + "type": "text", + "content": " is the generated auxiliary sentence (facial description or aesthetic caption) for the object-level visual content." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 374, + 212, + 385 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 374, + 212, + 385 + ], + "spans": [ + { + "bbox": [ + 45, + 374, + 212, + 385 + ], + "type": "text", + "content": "3.4 Rationale Dataset Construction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 44, + 392, + 301, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 392, + 301, + 555 + ], + "spans": [ + { + "bbox": [ + 44, + 392, + 301, + 555 + ], + "type": "text", + "content": "The current MASC benchmark includes only specific target (aspect) labels within the image-text pair sentences and their corresponding sentiment polarities. Recently, Fan et al. [24] introduced a dataset for MASC with cause analysis, focusing exclusively on textual semantics rather than integrating both visual and textual cues. Moreover, they overlook the affective resonance evoked by image aesthetic attributes, eliminating a crucial layer of emotional cues and resulting in an incomplete sentiment representation. This omission hinders the holistic integration of textual and visual modalities, leading to suboptimal sentiment modeling. Therefore, we employ GPT-4o to generate semantic and impression rationales, with the detailed generation process outlined in Algorithm 1." + } + ] + } + ], + "index": 10 + }, + { + "type": "code", + "bbox": [ + 45, + 580, + 301, + 744 + ], + "blocks": [ + { + "bbox": [ + 45, + 566, + 236, + 578 + ], + "lines": [ + { + "bbox": [ + 45, + 566, + 236, + 578 + ], + "spans": [ + { + "bbox": [ + 45, + 566, + 236, + 578 + ], + "type": "text", + "content": "Algorithm 1 Rationale Dataset Construction" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "lines": [ + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "spans": [ + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "text", + "content": "Input: All samples " + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "inline_equation", + "content": "(V, S, T, Y)" + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "text", + "content": " in MASC dataset " + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "text", + "content": " \nOutput: Rationale dataset " + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "text", + "content": " which contains Semantic Rationale (SR) and Impression Rationale (IR) \n1: Design & refine prompt pool for SR (SRP) and IR (IRP) \n2: for each sample " + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "inline_equation", + "content": "(V_i, S_i, T_i, Y_i)" + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "text", + "content": " do \n3: //Randomly select a prompt from SRP for SR \n4: " + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "inline_equation", + "content": "SR_{prompt} \\gets PromptPoolforSR(V_i, S_i, T_i, Y_i)" + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "text", + "content": " \n5: //Randomly select a prompt from IRP for IR \n6: " + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "inline_equation", + "content": "IR_{prompt} \\gets PromptPoolforIR(V_i, S_i, T_i, Y_i)" + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "text", + "content": " \n7: Produce SR and IR via GPT-4o \n8: " + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "inline_equation", + "content": "SR_i \\gets GPT-4o(V_i, S_i, T_i, Y_i, SR_{prompt})" + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "text", + "content": " \n9: " + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "inline_equation", + "content": "IR_i \\gets GPT-4o(V_i, S_i, T_i, Y_i, IR_{prompt})" + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "text", + "content": " \n10: Add " + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "inline_equation", + "content": "(V_i, S_i, T_i, Y_i, SR_i, IR_i)" + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 45, + 580, + 301, + 744 + ], + "type": "text", + "content": " \n11: end for" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "code_body" + } + ], + "index": 12, + "sub_type": "algorithm" + }, + { + "type": "table", + "bbox": [ + 317, + 72, + 558, + 313 + ], + "blocks": [ + { + "bbox": [ + 344, + 43, + 529, + 63 + ], + "lines": [ + { + "bbox": [ + 344, + 43, + 529, + 63 + ], + "spans": [ + { + "bbox": [ + 344, + 43, + 529, + 63 + ], + "type": "text", + "content": "TABLE1 Example prompts for semantic rationale generation." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 72, + 558, + 313 + ], + "lines": [ + { + "bbox": [ + 317, + 72, + 558, + 313 + ], + "spans": [ + { + "bbox": [ + 317, + 72, + 558, + 313 + ], + "type": "table", + "html": "
TypePrompts
System PromptYou are an AI assistant specializing in multimodal understanding and sentiment analysis, particularly in scenarios involving the integration of image and text modalities.
Semantic Rationale Generation PromptYou will be provided with an image-text pair. Your task is to analyze the sentiment towards the specified entity {aspect} and explain why the sentiment polarity {label} is appropriate.\nYour explanation should consider both the semantic meaning of the text and the visual representation of the image, focusing on explicit content and the emotional or contextual cues conveyed by their combination.\nStart your response with: "Based on the image-text pair, the sentiment towards {aspect} is {label} because...". Provide a concise, focused explanation highlighting the single most compelling reason for this sentiment classification.
", + "image_path": "f4d1c1a9ed4288e54021cae5187de76218c0eecc813a772b6d3020f33e5570c6.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 330, + 565, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 330, + 565, + 411 + ], + "spans": [ + { + "bbox": [ + 308, + 330, + 565, + 411 + ], + "type": "text", + "content": "To comprehensively capture the emotional rationale underlying the identified sentiment polarity from a semantic perspective of both image and text, we employ GPT-4o (gpt-4o-2024-05-13) via the OpenAI " + }, + { + "bbox": [ + 308, + 330, + 565, + 411 + ], + "type": "inline_equation", + "content": "\\mathrm{API}^1" + }, + { + "bbox": [ + 308, + 330, + 565, + 411 + ], + "type": "text", + "content": " to generate SR. Meanwhile, to enable the model to effectively capture implicit emotional cues arising from the affective resonance of aesthetic attributes, we employ GPT-4o to generate the IR." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 411, + 564, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 411, + 564, + 539 + ], + "spans": [ + { + "bbox": [ + 308, + 411, + 564, + 539 + ], + "type": "text", + "content": "To enhance the diversity of generated semantic and impression rationales (SR and IR), we designed and refined a series of templates to construct separate prompt pools for SR and IR, from which a prompt is randomly selected as instructions to guide GPT-4o in generating the corresponding rationale. In this study, we adopt the approach outlined by Sarah et al. [75] and Wang et al. [70], leveraging tailored prompts conditioned on the dataset's gold-standard annotations to generate SR and IR using GPT-4o. The example prompts for generating SR and IR are presented in Tables 1 and 2, respectively." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 308, + 551, + 529, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 551, + 529, + 563 + ], + "spans": [ + { + "bbox": [ + 308, + 551, + 529, + 563 + ], + "type": "text", + "content": "3.5 Linguistic-aware Semantic Alignment(LSA)" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 308, + 565, + 564, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 565, + 564, + 612 + ], + "spans": [ + { + "bbox": [ + 308, + 565, + 564, + 612 + ], + "type": "text", + "content": "We first introduce dynamic patch selection in Sec. 3.5.1. Then, we introduce the semantic patch calibration in Sec. 3.5.2. and patch-token alignment in Sec. 3.5.3. The overall process of LSA is shown in the persucode 2." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 309, + 620, + 474, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 620, + 474, + 633 + ], + "spans": [ + { + "bbox": [ + 309, + 620, + 474, + 633 + ], + "type": "text", + "content": "3.5.1 Dynamic Patch Selection(DPS)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 634, + 564, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 634, + 564, + 704 + ], + "spans": [ + { + "bbox": [ + 307, + 634, + 564, + 704 + ], + "type": "text", + "content": "Dynamic Patch Selection (DPS) is considered a discriminative task that assigns significance scores to visual patches and selects valuable patches based on high scores. For the image in an image-text pair, we opt for vision Transformers as the visual encoder. The image " + }, + { + "bbox": [ + 307, + 634, + 564, + 704 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 307, + 634, + 564, + 704 + ], + "type": "text", + "content": " is divided into " + }, + { + "bbox": [ + 307, + 634, + 564, + 704 + ], + "type": "inline_equation", + "content": "N_v" + }, + { + "bbox": [ + 307, + 634, + 564, + 704 + ], + "type": "text", + "content": " non-overlapping patches by spatial distribution." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 308, + 704, + 564, + 727 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 704, + 564, + 727 + ], + "spans": [ + { + "bbox": [ + 308, + 704, + 564, + 727 + ], + "type": "text", + "content": "These patches are then input as a visual token sequence into the vision Transformer to obtain a set of visual" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 317, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 317, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 317, + 35 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 317, + 735, + 432, + 746 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 735, + 432, + 746 + ], + "spans": [ + { + "bbox": [ + 317, + 735, + 432, + 746 + ], + "type": "text", + "content": "1. https://platform.openai.com" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 53, + 72, + 294, + 290 + ], + "blocks": [ + { + "bbox": [ + 77, + 43, + 268, + 63 + ], + "lines": [ + { + "bbox": [ + 77, + 43, + 268, + 63 + ], + "spans": [ + { + "bbox": [ + 77, + 43, + 268, + 63 + ], + "type": "text", + "content": "TABLE2 Example prompts for impression rationale generation." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 53, + 72, + 294, + 290 + ], + "lines": [ + { + "bbox": [ + 53, + 72, + 294, + 290 + ], + "spans": [ + { + "bbox": [ + 53, + 72, + 294, + 290 + ], + "type": "table", + "html": "
TypePrompts
System PromptYou are an AI assistant specializing in multimodal emotion and aesthetic understanding, especially in analyzing the emotional responses elicited by visual content.
Impression Rationale Generation PromptYou will be given an image-text pair. Your task is to analyze the specified entity {aspect} and its associated sentiment label {label} based entirely on the image's aesthetic attributes and the emotional resonance it conveys.Focus exclusively on the overall impression and visual connotations conveyed by the image, emphasizing why the assigned sentiment {label} aligns with the general mood or perception evoked by the entity. Avoid mentioning specific details; instead, high-light the prevailing emotional or aesthetic impression.
", + "image_path": "862dcad0b826d0cd5df4ca36a57269b05df71ee3565aaae7bcebc5ed440fcd2d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 323, + 300, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 323, + 300, + 418 + ], + "spans": [ + { + "bbox": [ + 44, + 323, + 300, + 418 + ], + "type": "text", + "content": "patch features " + }, + { + "bbox": [ + 44, + 323, + 300, + 418 + ], + "type": "inline_equation", + "content": "V = (v_{cls}, v_1, v_2, \\ldots, v_{N_v}) \\in \\mathbb{R}^{(N_v + 1) \\times d}" + }, + { + "bbox": [ + 44, + 323, + 300, + 418 + ], + "type": "text", + "content": ". For sentence " + }, + { + "bbox": [ + 44, + 323, + 300, + 418 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 44, + 323, + 300, + 418 + ], + "type": "text", + "content": ", a pre-trained Transformer serves as the textual encoder. The sentence is tokenized into " + }, + { + "bbox": [ + 44, + 323, + 300, + 418 + ], + "type": "inline_equation", + "content": "N_s" + }, + { + "bbox": [ + 44, + 323, + 300, + 418 + ], + "type": "text", + "content": " tokens and processed by the encoder to extract linguistic features " + }, + { + "bbox": [ + 44, + 323, + 300, + 418 + ], + "type": "inline_equation", + "content": "S = (s_1, s_2, \\ldots, s_{N_s}) \\in \\mathbb{R}^{N_s \\times d}" + }, + { + "bbox": [ + 44, + 323, + 300, + 418 + ], + "type": "text", + "content": ". Subsequently, we incorporate spatial information from images into visual patch features and use an MLP-based score-sensitive prediction mechanism to learn significant scores:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 74, + 423, + 299, + 437 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 423, + 299, + 437 + ], + "spans": [ + { + "bbox": [ + 74, + 423, + 299, + 437 + ], + "type": "interline_equation", + "content": "p _ {i} ^ {s} = \\operatorname {S i g m o i d} \\left(\\mathbf {M L P} \\left(\\boldsymbol {v} _ {i}\\right)\\right), i \\in \\{1, 2, \\dots , N _ {v} \\}, \\tag {4}", + "image_path": "e2763142f562a1ad3eee2ed812f79fe94383c37426703d5504bad7181405583a.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 441, + 299, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 441, + 299, + 570 + ], + "spans": [ + { + "bbox": [ + 44, + 441, + 299, + 570 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 44, + 441, + 299, + 570 + ], + "type": "inline_equation", + "content": "p_i^s \\in [0,1]" + }, + { + "bbox": [ + 44, + 441, + 299, + 570 + ], + "type": "text", + "content": " represents the importance score assigned to each visual patch. Moreover, achieving refined cross-modal alignment requires more than depending solely on a scoring mechanism to identify valuable visual patches without linguistic supervision [76], [77]. Consequently, we introduce linguistic context by calculating attentive scores between visual patches and the input sentence. First, we derive linguistic-aware scores " + }, + { + "bbox": [ + 44, + 441, + 299, + 570 + ], + "type": "inline_equation", + "content": "p_i^l" + }, + { + "bbox": [ + 44, + 441, + 299, + 570 + ], + "type": "text", + "content": " through cross-attention between visual patches and linguistic features. Then, we enhance key visual content by computing self-attention within patches, producing image-prominent scores " + }, + { + "bbox": [ + 44, + 441, + 299, + 570 + ], + "type": "inline_equation", + "content": "p_i^e" + }, + { + "bbox": [ + 44, + 441, + 299, + 570 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 73, + 574, + 299, + 590 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 574, + 299, + 590 + ], + "spans": [ + { + "bbox": [ + 73, + 574, + 299, + 590 + ], + "type": "interline_equation", + "content": "p _ {i} ^ {l} = \\operatorname {N o r m} \\left(\\boldsymbol {v} _ {i} \\cdot S / d\\right), p _ {i} ^ {e} = \\operatorname {N o r m} \\left(\\boldsymbol {v} _ {i} \\cdot V / d\\right), \\tag {5}", + "image_path": "5ec5f7b35d7fcc566dac4feb24d838fc7b43767d47b5c05ad3840c83e4fb7886.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 594, + 300, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 594, + 300, + 640 + ], + "spans": [ + { + "bbox": [ + 44, + 594, + 300, + 640 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 44, + 594, + 300, + 640 + ], + "type": "inline_equation", + "content": "\\text{Norm}(\\cdot)" + }, + { + "bbox": [ + 44, + 594, + 300, + 640 + ], + "type": "text", + "content": " denotes the normalization of scores to a range from 0 to 1. " + }, + { + "bbox": [ + 44, + 594, + 300, + 640 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 44, + 594, + 300, + 640 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 44, + 594, + 300, + 640 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 44, + 594, + 300, + 640 + ], + "type": "text", + "content": " represent the global embeddings for linguistic features and visual patches, respectively. These scores are integrated to derive the final value score:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 645, + 299, + 668 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 299, + 668 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 299, + 668 + ], + "type": "interline_equation", + "content": "p _ {i} ^ {f} = (1 - \\beta) p _ {i} ^ {s} + \\frac {\\beta}{2} \\left(p _ {i} ^ {l} + p _ {i} ^ {e}\\right), \\tag {6}", + "image_path": "363d59fd750d7001035634b09406a37e863cc3cb442efb4b51e2b8ec1dbc8857.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 44, + 672, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 672, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 672, + 301, + 748 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 44, + 672, + 301, + 748 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 44, + 672, + 301, + 748 + ], + "type": "text", + "content": " refers to the weight parameter. After obtaining the value score " + }, + { + "bbox": [ + 44, + 672, + 301, + 748 + ], + "type": "inline_equation", + "content": "p^f = (p_1^f, p_2^f, p_3^f, \\ldots, p_{N_v}^f) \\in \\mathbb{R}^{N_v}" + }, + { + "bbox": [ + 44, + 672, + 301, + 748 + ], + "type": "text", + "content": ", we convert it into a binary decision matrix " + }, + { + "bbox": [ + 44, + 672, + 301, + 748 + ], + "type": "inline_equation", + "content": "\\{0, 1\\}^{N_v}" + }, + { + "bbox": [ + 44, + 672, + 301, + 748 + ], + "type": "text", + "content": " to determine patch selection. This matrix is constructed using the Gumbel-Softmax technique [78], ensuring a smooth and differentiable sampling process. The Gumbel-Softmax matrix" + } + ] + } + ], + "index": 10 + }, + { + "type": "code", + "bbox": [ + 312, + 54, + 564, + 336 + ], + "blocks": [ + { + "bbox": [ + 310, + 40, + 556, + 53 + ], + "lines": [ + { + "bbox": [ + 310, + 40, + 556, + 53 + ], + "spans": [ + { + "bbox": [ + 310, + 40, + 556, + 53 + ], + "type": "text", + "content": "Algorithm 2 Linguistic-aware Semantic Alignment (LSA)" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "lines": [ + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "spans": [ + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "text", + "content": "1: procedure DYNAMIC PATCH SELECTION(V, S) \n2: Extract visual patches " + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "inline_equation", + "content": "V \\leftarrow \\mathrm{ViT}(V)" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "text", + "content": ", text tokens " + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "inline_equation", + "content": "S \\leftarrow" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "text", + "content": " TextEnc(S) \n3: Compute significance scores: " + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "inline_equation", + "content": "p_i^s \\leftarrow \\mathrm{MLP}(v_i)" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "inline_equation", + "content": "p_i^l \\leftarrow \\mathrm{Norm}(v_i S^\\top)" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "inline_equation", + "content": "p_i^e \\leftarrow \\mathrm{Norm}(v_i V^\\top)" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "text", + "content": " \n4: Fuse scores: " + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "inline_equation", + "content": "p_i^f \\leftarrow (1 - \\beta)p_i^s + \\frac{\\beta}{2}(p_i^l + p_i^e)" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "text", + "content": " \n5: Apply Gumbel-Softmax sampling to obtain binary mask " + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "inline_equation", + "content": "D \\in \\{0, 1\\}^{N_v}" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "text", + "content": " \n6: Return selected patches " + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "inline_equation", + "content": "V^p \\leftarrow \\{v_i | D_i = 1\\}" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "text", + "content": " \n7: end procedure \n8: procedure SEMANTIC PATCH CALIBRATION(" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "inline_equation", + "content": "V^p" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "text", + "content": ") \n9: Aggregate key patches: " + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "inline_equation", + "content": "\\tilde{V}^p \\leftarrow \\mathrm{Softmax}(\\mathrm{MLP}(V^p)) \\cdot V^p \\quad \\triangleright" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "text", + "content": " Adaptive weighting \n10: Fuse redundant patches: " + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "inline_equation", + "content": "\\tilde{v}^r \\leftarrow \\sum \\tilde{p}_i v_i \\quad \\triangleright" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "text", + "content": " Weighted sum via " + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "inline_equation", + "content": "p^f" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "text", + "content": " \n11: Return " + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "inline_equation", + "content": "\\tilde{V}^p \\leftarrow [v_{cls}; \\tilde{V}^p; \\tilde{v}^r]" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "text", + "content": " \n12: end procedure \n13: procedure PATCH-TOKEN ALIGNMENT(" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "inline_equation", + "content": "\\tilde{V}^p, S" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "text", + "content": ") \n14: Compute cosine similarity matrix " + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "inline_equation", + "content": "A \\in \\mathbb{R}^{(N_f + 2) \\times N_s}" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "text", + "content": " \n15: Calculate alignment score " + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "inline_equation", + "content": "K(V, S) \\leftarrow \\frac{1}{2} (\\text{mean}(\\text{max}_j A_{ij}) + \\text{mean}(\\text{max}_i A_{ij}))" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "text", + "content": " \n16: Optimize with " + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\text{align}} \\leftarrow \\text{Bi-directional Triplet Loss}(K(V, S), K(V, \\hat{S}), K(\\hat{V}, S))" + }, + { + "bbox": [ + 312, + 54, + 564, + 336 + ], + "type": "text", + "content": " \n17: end procedure" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "code_body" + } + ], + "index": 12, + "sub_type": "algorithm" + }, + { + "bbox": [ + 309, + 357, + 368, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 357, + 368, + 368 + ], + "spans": [ + { + "bbox": [ + 309, + 357, + 368, + 368 + ], + "type": "text", + "content": "is defined as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 348, + 372, + 564, + 401 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 348, + 372, + 564, + 401 + ], + "spans": [ + { + "bbox": [ + 348, + 372, + 564, + 401 + ], + "type": "interline_equation", + "content": "\\boldsymbol {M} _ {i, l} = \\frac {\\exp \\left(\\log \\left(\\boldsymbol {m} _ {i , l} + G _ {i , l}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {L} \\exp \\left(\\log \\left(\\boldsymbol {m} _ {i , j} + G _ {i , j}\\right) / \\tau\\right)}, \\tag {7}", + "image_path": "bafa5c4b0108ccda78786190442994f1286a5fa457afc2bc498ac933f7e041e6.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 405, + 564, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 405, + 564, + 464 + ], + "spans": [ + { + "bbox": [ + 308, + 405, + 564, + 464 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 308, + 405, + 564, + 464 + ], + "type": "inline_equation", + "content": "M \\in \\mathbb{R}^{N_v \\times L}" + }, + { + "bbox": [ + 308, + 405, + 564, + 464 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 308, + 405, + 564, + 464 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 308, + 405, + 564, + 464 + ], + "type": "text", + "content": " indicates the total number of categories. In this scenario, " + }, + { + "bbox": [ + 308, + 405, + 564, + 464 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 308, + 405, + 564, + 464 + ], + "type": "text", + "content": " is set to 2 for the binary decision " + }, + { + "bbox": [ + 308, + 405, + 564, + 464 + ], + "type": "inline_equation", + "content": "(\\pmb{m}_{i,1} = p_i^f, \\pmb{m}_{i,2} = 1 - p_i^f)" + }, + { + "bbox": [ + 308, + 405, + 564, + 464 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 308, + 405, + 564, + 464 + ], + "type": "inline_equation", + "content": "G_i = -\\log (-\\log (U_i))" + }, + { + "bbox": [ + 308, + 405, + 564, + 464 + ], + "type": "text", + "content": " represents the Gumbel distribution, " + }, + { + "bbox": [ + 308, + 405, + 564, + 464 + ], + "type": "inline_equation", + "content": "U_i" + }, + { + "bbox": [ + 308, + 405, + 564, + 464 + ], + "type": "text", + "content": " refers to the uniform distribution and " + }, + { + "bbox": [ + 308, + 405, + 564, + 464 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 308, + 405, + 564, + 464 + ], + "type": "text", + "content": " is the temperature parameter." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 464, + 564, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 464, + 564, + 489 + ], + "spans": [ + { + "bbox": [ + 308, + 464, + 564, + 489 + ], + "type": "text", + "content": "Next, we obtain the differentiable decision matrix " + }, + { + "bbox": [ + 308, + 464, + 564, + 489 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 308, + 464, + 564, + 489 + ], + "type": "text", + "content": " by applying the arg-max on " + }, + { + "bbox": [ + 308, + 464, + 564, + 489 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 308, + 464, + 564, + 489 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 362, + 493, + 564, + 507 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 362, + 493, + 564, + 507 + ], + "spans": [ + { + "bbox": [ + 362, + 493, + 564, + 507 + ], + "type": "interline_equation", + "content": "\\boldsymbol {D} = \\operatorname {S a m p l i n g} (\\boldsymbol {M}) _ {*}, 1 \\in \\{0, 1 \\} ^ {N _ {v}}, \\tag {8}", + "image_path": "721f8b1ab55a13d424546f2c4625788b66bd13787b6b453dfc2177c5cb573b32.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 307, + 512, + 564, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 512, + 564, + 582 + ], + "spans": [ + { + "bbox": [ + 307, + 512, + 564, + 582 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 307, + 512, + 564, + 582 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 307, + 512, + 564, + 582 + ], + "type": "text", + "content": " indicates patch selection outcomes: \"1\" for important patches and \"0\" for redundant ones. In the training stage, gradients are backpropagated through the differentiable decision matrix, enabling the dynamic selection of valuable visual patches via the score-sensitive prediction mechanism." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 309, + 592, + 484, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 592, + 484, + 605 + ], + "spans": [ + { + "bbox": [ + 309, + 592, + 484, + 605 + ], + "type": "text", + "content": "3.5.2 Semantic Patch Calibration(SPC)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 307, + 606, + 564, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 606, + 564, + 714 + ], + "spans": [ + { + "bbox": [ + 307, + 606, + 564, + 714 + ], + "type": "text", + "content": "This section aims to further refine the semantic representation of the selected valuable visual patches. After dynamically selecting important visual patches guided by linguistic supervision, we designate them as " + }, + { + "bbox": [ + 307, + 606, + 564, + 714 + ], + "type": "inline_equation", + "content": "V^{p} = \\left(v_{1}^{p}, v_{2}^{p}, \\ldots, v_{N_{p}}^{p}\\right) \\in \\mathbb{R}^{N_{p} \\times d}" + }, + { + "bbox": [ + 307, + 606, + 564, + 714 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 307, + 606, + 564, + 714 + ], + "type": "inline_equation", + "content": "N_{p}" + }, + { + "bbox": [ + 307, + 606, + 564, + 714 + ], + "type": "text", + "content": " is the number of selected valuable visual patches. We employ an aggregation network [79] to model multiple aggregation weights and combine the selected " + }, + { + "bbox": [ + 307, + 606, + 564, + 714 + ], + "type": "inline_equation", + "content": "N_{p}" + }, + { + "bbox": [ + 307, + 606, + 564, + 714 + ], + "type": "text", + "content": " visual patches to generate " + }, + { + "bbox": [ + 307, + 606, + 564, + 714 + ], + "type": "inline_equation", + "content": "N_{f}" + }, + { + "bbox": [ + 307, + 606, + 564, + 714 + ], + "type": "text", + "content": " informative visual features:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 352, + 717, + 564, + 750 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 352, + 717, + 564, + 750 + ], + "spans": [ + { + "bbox": [ + 352, + 717, + 564, + 750 + ], + "type": "interline_equation", + "content": "\\tilde {\\boldsymbol {v}} _ {j} ^ {p} = \\sum_ {i = 1} ^ {N _ {p}} (\\boldsymbol {W}) _ {i j} \\cdot \\boldsymbol {v} _ {i} ^ {p}, \\quad j = [ 1, \\dots , N _ {f} ], \\tag {9}", + "image_path": "3beda8342affcae39beb96594ec67f80b0c0dc46803261013741b2be59f83ad0.jpg" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 111, + 41, + 299, + 56 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 41, + 299, + 56 + ], + "spans": [ + { + "bbox": [ + 111, + 41, + 299, + 56 + ], + "type": "interline_equation", + "content": "\\boldsymbol {W} = \\operatorname {s o f t m a x} \\left(\\mathbf {M L P} \\left(\\boldsymbol {V} ^ {p}\\right)\\right), \\tag {10}", + "image_path": "1b73e9a868e9432fecdd3f367dde5505d76e4a2a2988557afd4f0a3186eaaae7.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 60, + 300, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 60, + 300, + 153 + ], + "spans": [ + { + "bbox": [ + 44, + 60, + 300, + 153 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 44, + 60, + 300, + 153 + ], + "type": "inline_equation", + "content": "(\\mathbf{W})" + }, + { + "bbox": [ + 44, + 60, + 300, + 153 + ], + "type": "text", + "content": " denotes the normalized weight matrix and " + }, + { + "bbox": [ + 44, + 60, + 300, + 153 + ], + "type": "inline_equation", + "content": "\\sum_{i=1}^{N_s} (\\mathbf{W})_{ij} = 1" + }, + { + "bbox": [ + 44, + 60, + 300, + 153 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 44, + 60, + 300, + 153 + ], + "type": "inline_equation", + "content": "N_f" + }, + { + "bbox": [ + 44, + 60, + 300, + 153 + ], + "type": "text", + "content": " is the number of aggregated patches " + }, + { + "bbox": [ + 44, + 60, + 300, + 153 + ], + "type": "inline_equation", + "content": "(N_f < N_p)" + }, + { + "bbox": [ + 44, + 60, + 300, + 153 + ], + "type": "text", + "content": ". The aggregation network adaptively combines visually similar patches and is differentiable for end-to-end training. While redundant visual patches can be discarded, they may contain supplementary semantic features for refined cross-modal alignment. Therefore, we fuse them into a single patch:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 78, + 159, + 300, + 198 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 159, + 300, + 198 + ], + "spans": [ + { + "bbox": [ + 78, + 159, + 300, + 198 + ], + "type": "interline_equation", + "content": "\\tilde {\\boldsymbol {v}} ^ {r} = \\sum_ {i \\in \\mathcal {N}} \\tilde {p} _ {i} \\cdot \\boldsymbol {v} _ {i}, \\quad \\tilde {p} _ {i} = \\frac {\\exp \\left(p _ {i} ^ {f}\\right) \\boldsymbol {D} _ {i}}{\\sum_ {i = 1} ^ {N} \\exp \\left(p _ {i} ^ {f}\\right) \\boldsymbol {D} _ {i}}, \\tag {11}", + "image_path": "8153f2c7b7698240bccfa3072675717b491e1e93395137231384633f5325fe8a.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 201, + 301, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 201, + 301, + 255 + ], + "spans": [ + { + "bbox": [ + 44, + 201, + 301, + 255 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 44, + 201, + 301, + 255 + ], + "type": "inline_equation", + "content": "\\mathcal{N}" + }, + { + "bbox": [ + 44, + 201, + 301, + 255 + ], + "type": "text", + "content": " represents the set for redundant visual patches. " + }, + { + "bbox": [ + 44, + 201, + 301, + 255 + ], + "type": "inline_equation", + "content": "\\tilde{p}_i" + }, + { + "bbox": [ + 44, + 201, + 301, + 255 + ], + "type": "text", + "content": " denotes the normalized score of the value score " + }, + { + "bbox": [ + 44, + 201, + 301, + 255 + ], + "type": "inline_equation", + "content": "p_i^f" + }, + { + "bbox": [ + 44, + 201, + 301, + 255 + ], + "type": "text", + "content": ". Finally, this component models the calibrated refined visual patches, denoted as " + }, + { + "bbox": [ + 44, + 201, + 301, + 255 + ], + "type": "inline_equation", + "content": "\\tilde{V}^p = (v_{cls},\\tilde{v}_1^p,\\tilde{v}_2^p,\\dots ,\\tilde{v}_{N_f}^p,\\tilde{v}^r)\\in \\mathbb{R}^{(N_f + 2)\\times d}" + }, + { + "bbox": [ + 44, + 201, + 301, + 255 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 45, + 262, + 198, + 275 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 262, + 198, + 275 + ], + "spans": [ + { + "bbox": [ + 45, + 262, + 198, + 275 + ], + "type": "text", + "content": "3.5.3 Patch-token Alignment(PTA)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "spans": [ + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "text", + "content": "This module aims to achieve the fine-grained patch-token level alignment. Specifically, we first utilize the refined visual patches " + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "inline_equation", + "content": "\\tilde{V}^p" + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "text", + "content": " and linguistic features " + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "text", + "content": " to compute tokenwise similarities, producing a patch-token similarity matrix " + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "inline_equation", + "content": "A\\in \\mathbb{R}^{(N_f + 2)\\times N_s}" + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "text", + "content": ". " + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "inline_equation", + "content": "(A)_{ij} = \\frac{(\\tilde{v}_i)^T s_j}{\\|\\tilde{v}_i\\| \\|s_j\\|}" + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "text", + "content": " denotes the patch-token level alignment score between the " + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "text", + "content": "-th visual patch and the " + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "text", + "content": "-th word. Subsequently, maximum-correspondence interaction is introduced to aggregate cross-modal alignment. For each visual patch (or token), we identify the most aligned textual token (or patch) and calculate the average alignment score " + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "inline_equation", + "content": "K(V,S)" + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "text", + "content": ", representing the overall alignment between the image " + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "text", + "content": " and the sentence " + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 44, + 277, + 300, + 419 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 425, + 299, + 467 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 425, + 299, + 467 + ], + "spans": [ + { + "bbox": [ + 55, + 425, + 299, + 467 + ], + "type": "interline_equation", + "content": "K (V, S) = \\frac {1}{N _ {f} + 2} \\sum_ {i = 1} ^ {N _ {f} + 2} \\max _ {j} (\\boldsymbol {A}) _ {i j} + \\frac {1}{N _ {s}} \\sum_ {j = 1} ^ {N _ {s}} \\max _ {i} (\\boldsymbol {A}) _ {i j} \\tag {12}", + "image_path": "f4df37f89c13fdd7683da8ec9fb4436a07840bfc250550b2ef885ed85a4637ca.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 44, + 468, + 299, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 468, + 299, + 491 + ], + "spans": [ + { + "bbox": [ + 44, + 468, + 299, + 491 + ], + "type": "text", + "content": "Following a previous method [80], the bi-direction triplet loss with hard negative mining is exploited:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 87, + 496, + 299, + 538 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 87, + 496, + 299, + 538 + ], + "spans": [ + { + "bbox": [ + 87, + 496, + 299, + 538 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\text {a l i g n}} = \\sum_ {(V, S)} [ \\gamma - K (V, S) + K (V, \\hat {S}) ] _ {+} \\tag {13} \\\\ + [ \\gamma - K (V, S) + K (\\hat {V}, S) ] _ {+}, \\\\ \\end{array}", + "image_path": "2bca49dd24d43fc768b5a0cdf5379321f0e4de3e014e9c8946bdc65af1ffdd8d.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 44, + 543, + 300, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 543, + 300, + 602 + ], + "spans": [ + { + "bbox": [ + 44, + 543, + 300, + 602 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 44, + 543, + 300, + 602 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 44, + 543, + 300, + 602 + ], + "type": "text", + "content": " is the trade-off parameter. " + }, + { + "bbox": [ + 44, + 543, + 300, + 602 + ], + "type": "inline_equation", + "content": "[x]_{+} = \\max (x,0)" + }, + { + "bbox": [ + 44, + 543, + 300, + 602 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 44, + 543, + 300, + 602 + ], + "type": "inline_equation", + "content": "(V,S)" + }, + { + "bbox": [ + 44, + 543, + 300, + 602 + ], + "type": "text", + "content": " refers to a positive image-text pair in the mini-batch. Moreover, " + }, + { + "bbox": [ + 44, + 543, + 300, + 602 + ], + "type": "inline_equation", + "content": "\\hat{S} = \\operatorname{argmax}_{j\\neq S}K(V,j)" + }, + { + "bbox": [ + 44, + 543, + 300, + 602 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 44, + 543, + 300, + 602 + ], + "type": "inline_equation", + "content": "\\hat{V} = \\operatorname{argmax}_{i\\neq V}K(i,V)" + }, + { + "bbox": [ + 44, + 543, + 300, + 602 + ], + "type": "text", + "content": " indicate the hardest negative sentence and visual examples within a mini-batch, respectively." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 44, + 616, + 187, + 628 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 616, + 187, + 628 + ], + "spans": [ + { + "bbox": [ + 44, + 616, + 187, + 628 + ], + "type": "text", + "content": "3.6 Rationale-aware Learning" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 44, + 631, + 300, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 631, + 300, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 631, + 300, + 746 + ], + "type": "text", + "content": "To endow the model with the ability to perform semantic causality and impression reasoning, we propose a rationale-aware learning framework designed to fine-tune a sequence-to-sequence (seq2seq) model. This seq2seq model is proposed to achieve three task objectives for each specific target within the image-text pair: sentiment classification (SC), semantic rationale generation (SRG), and impression rationale generation (IRG). These tasks are differentiated by the use of distinct input configurations and input content. For SC, the decoder outputs only the predicted sentiment" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 42, + 563, + 65 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 563, + 65 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 563, + 65 + ], + "type": "text", + "content": "polarity. In SRG and IRG, the decoder produces the corresponding rationale and the sentiment prediction." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 308, + 65, + 564, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 65, + 564, + 180 + ], + "spans": [ + { + "bbox": [ + 308, + 65, + 564, + 180 + ], + "type": "text", + "content": "Specifically, our input comprises the textual sentence " + }, + { + "bbox": [ + 308, + 65, + 564, + 180 + ], + "type": "inline_equation", + "content": "S = (s_{1}, s_{2}, \\ldots, s_{N_{s}})" + }, + { + "bbox": [ + 308, + 65, + 564, + 180 + ], + "type": "text", + "content": ", the overall aesthetic caption of the image " + }, + { + "bbox": [ + 308, + 65, + 564, + 180 + ], + "type": "inline_equation", + "content": "A^{c} = (a_{1}^{c}, a_{2}^{c}, \\ldots, a_{N_{c}}^{c})" + }, + { + "bbox": [ + 308, + 65, + 564, + 180 + ], + "type": "text", + "content": ", the object-level description " + }, + { + "bbox": [ + 308, + 65, + 564, + 180 + ], + "type": "inline_equation", + "content": "A^{o} = (a_{1}^{o}, a_{2}^{o}, \\ldots, a_{N_{o}}^{o})" + }, + { + "bbox": [ + 308, + 65, + 564, + 180 + ], + "type": "text", + "content": ", which pertains to either facial or aesthetic attributes and the specific target " + }, + { + "bbox": [ + 308, + 65, + 564, + 180 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 308, + 65, + 564, + 180 + ], + "type": "text", + "content": ". The input format is determined by the presence of the specific target within the visual content. For example, if the specific target is identified in the image, based on the annotations provided by Wang et al. [25], the input for SC, SRG, and IRG is defined as follows:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 370, + 186, + 564, + 200 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 186, + 564, + 200 + ], + "spans": [ + { + "bbox": [ + 370, + 186, + 564, + 200 + ], + "type": "interline_equation", + "content": "H ^ {\\mathrm {s c}} = \\operatorname {e n c o d e r} \\left(t _ {\\langle \\mathrm {s c} \\rangle}, A ^ {c}, S, T\\right), \\tag {14}", + "image_path": "dcdaa3880792ddf7cb03bb81958c3d68e202fe3298dacecb086cce79a93b452b.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 367, + 205, + 563, + 218 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 205, + 563, + 218 + ], + "spans": [ + { + "bbox": [ + 367, + 205, + 563, + 218 + ], + "type": "interline_equation", + "content": "H ^ {\\mathrm {s r g}} = \\operatorname {e n c o d e r} \\left(t _ {\\langle \\mathrm {s r g} \\rangle}, A ^ {c}, S, T\\right), \\tag {15}", + "image_path": "41df244fe389a53760c5227fe3f128250aad87fd1cfc796344b3c7c986c79c10.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 367, + 224, + 563, + 237 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 224, + 563, + 237 + ], + "spans": [ + { + "bbox": [ + 367, + 224, + 563, + 237 + ], + "type": "interline_equation", + "content": "H ^ {\\text {i r g}} = \\operatorname {e n c o d e r} \\left(t _ {\\langle \\mathrm {i r g} \\rangle}, A ^ {c}, S, T\\right), \\tag {16}", + "image_path": "5acc1392ca7e83c57baaccf4589d6ac105a595c6d177ff1ca2d628f3c06f7d50.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 307, + 243, + 564, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 243, + 564, + 370 + ], + "spans": [ + { + "bbox": [ + 307, + 243, + 564, + 370 + ], + "type": "text", + "content": "where encoder " + }, + { + "bbox": [ + 307, + 243, + 564, + 370 + ], + "type": "inline_equation", + "content": "(\\cdot)" + }, + { + "bbox": [ + 307, + 243, + 564, + 370 + ], + "type": "text", + "content": " is the Transformer encoder of the seq2seq model. The tokens " + }, + { + "bbox": [ + 307, + 243, + 564, + 370 + ], + "type": "inline_equation", + "content": "t_{\\langle \\mathrm{sc}\\rangle}, t_{\\langle \\mathrm{src}\\rangle}," + }, + { + "bbox": [ + 307, + 243, + 564, + 370 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 307, + 243, + 564, + 370 + ], + "type": "inline_equation", + "content": "t_{\\langle \\mathrm{irg}\\rangle}" + }, + { + "bbox": [ + 307, + 243, + 564, + 370 + ], + "type": "text", + "content": " are specialized tokens designed to represent distinct tasks. Although the specific aspects are not present in the image, this does not imply that sentimental cues from the image have no impact on predicting the sentiment polarity. On the contrary, incorporating sentiment cues from the holistic image can provide valuable insights into the influence of image aesthetic attributes on the sentiment prediction for the specific aspect. For samples where specific targets are present in the visual content, the input format is structured as follows:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 370, + 376, + 563, + 389 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 376, + 563, + 389 + ], + "spans": [ + { + "bbox": [ + 370, + 376, + 563, + 389 + ], + "type": "interline_equation", + "content": "H ^ {\\mathrm {s c}} = \\operatorname {e n c o d e r} \\left(t _ {\\langle \\mathrm {s c} \\rangle}, S, A ^ {o}, T\\right), \\tag {17}", + "image_path": "d87cc80f17376d53aa18a021c3dfd53bfc4ebdbcdf4505f4d2c7b8fd1d21d69f.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 367, + 394, + 563, + 407 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 394, + 563, + 407 + ], + "spans": [ + { + "bbox": [ + 367, + 394, + 563, + 407 + ], + "type": "interline_equation", + "content": "H ^ {\\mathrm {s r g}} = \\operatorname {e n c o d e r} \\left(t _ {\\left(\\mathrm {s r g}\\right)}, S, A ^ {o}, T\\right), \\tag {18}", + "image_path": "c3a55e0473720311e10e2d4c402da74c36c834e399f554389210047f51a67319.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 367, + 413, + 563, + 426 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 413, + 563, + 426 + ], + "spans": [ + { + "bbox": [ + 367, + 413, + 563, + 426 + ], + "type": "interline_equation", + "content": "H ^ {\\text {i r g}} = \\operatorname {e n c o d e r} \\left(t _ {\\langle \\text {i r g} \\rangle}, S, A ^ {o}, T\\right). \\tag {19}", + "image_path": "8574a8298aaec88834cdef610089c43a8221f47fe50653661c4c37844e56958f.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 308, + 431, + 564, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 431, + 564, + 570 + ], + "spans": [ + { + "bbox": [ + 308, + 431, + 564, + 570 + ], + "type": "text", + "content": "We employ fine-grained, object-level emotion-laden descriptions to establish alignment between specific targets and their corresponding objects in the image, which enhances both the accuracy and interpretability of the sentiment prediction process. Subsequently, these hidden features are passed through a stack of self-attention-based encoders, which dynamically fuse representations and model both intra-modal and cross-modal interactions. Finally, the decoder produces task-specific outputs. For Sentiment Classification (SC), the decoder generates the predicted sentiment polarity, selecting from \"positive,\" \"negative,\" or \"neutral,\" denoted as " + }, + { + "bbox": [ + 308, + 431, + 564, + 570 + ], + "type": "inline_equation", + "content": "\\hat{y}_{sc}" + }, + { + "bbox": [ + 308, + 431, + 564, + 570 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 384, + 575, + 563, + 590 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 384, + 575, + 563, + 590 + ], + "spans": [ + { + "bbox": [ + 384, + 575, + 563, + 590 + ], + "type": "interline_equation", + "content": "G ^ {\\mathrm {s c}} = \\left[ \\langle \\mathrm {s e n} \\rangle \\hat {y} ^ {\\mathrm {s c}} \\langle / \\mathrm {s e n} \\rangle \\right], \\tag {20}", + "image_path": "774a10219b19470c8fdfff9b2a0529c3851b267b0655c78fe280b8d6df1757fd.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 307, + 594, + 564, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 594, + 564, + 665 + ], + "spans": [ + { + "bbox": [ + 307, + 594, + 564, + 665 + ], + "type": "text", + "content": "where the special tokens " + }, + { + "bbox": [ + 307, + 594, + 564, + 665 + ], + "type": "inline_equation", + "content": "\\langle \\mathrm{sen}\\rangle" + }, + { + "bbox": [ + 307, + 594, + 564, + 665 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 307, + 594, + 564, + 665 + ], + "type": "inline_equation", + "content": "\\langle / \\mathrm{sen}\\rangle" + }, + { + "bbox": [ + 307, + 594, + 564, + 665 + ], + "type": "text", + "content": " are denoted as the start and end markers for SC predictors. For the two additional rationale generation tasks SRG and IRG, the decoder generates not only the semantic rationale " + }, + { + "bbox": [ + 307, + 594, + 564, + 665 + ], + "type": "inline_equation", + "content": "\\hat{s}r" + }, + { + "bbox": [ + 307, + 594, + 564, + 665 + ], + "type": "text", + "content": " and impression rationale " + }, + { + "bbox": [ + 307, + 594, + 564, + 665 + ], + "type": "inline_equation", + "content": "\\hat{i}r" + }, + { + "bbox": [ + 307, + 594, + 564, + 665 + ], + "type": "text", + "content": " for the specific target but also their corresponding sentiment predictions " + }, + { + "bbox": [ + 307, + 594, + 564, + 665 + ], + "type": "inline_equation", + "content": "\\hat{y}_{sr}" + }, + { + "bbox": [ + 307, + 594, + 564, + 665 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 307, + 594, + 564, + 665 + ], + "type": "inline_equation", + "content": "\\hat{y}_{si}" + }, + { + "bbox": [ + 307, + 594, + 564, + 665 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 362, + 670, + 563, + 685 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 362, + 670, + 563, + 685 + ], + "spans": [ + { + "bbox": [ + 362, + 670, + 563, + 685 + ], + "type": "interline_equation", + "content": "G ^ {\\mathrm {s r}} = \\left[ \\langle \\mathrm {s r} \\rangle \\hat {s} r \\langle / \\mathrm {s r} \\rangle \\langle \\mathrm {s e n} \\rangle \\hat {y} ^ {\\mathrm {s r}} \\langle / \\mathrm {s e n} \\rangle \\right], \\tag {21}", + "image_path": "6f0965427bb1d8fa056516e4f55ca9a310014c9c0542e0231438d95c04190203.jpg" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 363, + 687, + 563, + 707 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 363, + 687, + 563, + 707 + ], + "spans": [ + { + "bbox": [ + 363, + 687, + 563, + 707 + ], + "type": "interline_equation", + "content": "G ^ {\\mathrm {i r}} = \\left[ \\langle \\mathrm {i r} \\rangle \\hat {i r} \\langle / \\mathrm {i r} \\rangle \\langle \\mathrm {s e n} \\rangle \\hat {y} ^ {\\mathrm {i r}} \\langle / \\mathrm {s e n} \\rangle \\right], \\tag {22}", + "image_path": "27ab10d2e20a003a634285c4bb0ee585657ffcd96585edb897c6af55a29b1391.jpg" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 307, + 711, + 564, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 711, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 307, + 711, + 564, + 746 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 307, + 711, + 564, + 746 + ], + "type": "inline_equation", + "content": "\\langle \\mathrm{sr}\\rangle" + }, + { + "bbox": [ + 307, + 711, + 564, + 746 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 307, + 711, + 564, + 746 + ], + "type": "inline_equation", + "content": "\\langle / \\mathrm{sr}\\rangle" + }, + { + "bbox": [ + 307, + 711, + 564, + 746 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 307, + 711, + 564, + 746 + ], + "type": "inline_equation", + "content": "\\langle \\mathrm{ir}\\rangle" + }, + { + "bbox": [ + 307, + 711, + 564, + 746 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 307, + 711, + 564, + 746 + ], + "type": "inline_equation", + "content": "\\langle / \\mathrm{ir}\\rangle" + }, + { + "bbox": [ + 307, + 711, + 564, + 746 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 307, + 711, + 564, + 746 + ], + "type": "inline_equation", + "content": "\\langle \\mathrm{sen}\\rangle" + }, + { + "bbox": [ + 307, + 711, + 564, + 746 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 307, + 711, + 564, + 746 + ], + "type": "inline_equation", + "content": "\\langle / \\mathrm{sen}\\rangle" + }, + { + "bbox": [ + 307, + 711, + 564, + 746 + ], + "type": "text", + "content": " serve as specialized markers to delineate the rationale and sentiment polarity. Finally, the input sequence is uniformly denoted" + } + ] + } + ], + "index": 28 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 42, + 301, + 78 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 42, + 301, + 78 + ], + "spans": [ + { + "bbox": [ + 45, + 42, + 301, + 78 + ], + "type": "text", + "content": "as " + }, + { + "bbox": [ + 45, + 42, + 301, + 78 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 45, + 42, + 301, + 78 + ], + "type": "text", + "content": ", and the generated textual content is represented as " + }, + { + "bbox": [ + 45, + 42, + 301, + 78 + ], + "type": "inline_equation", + "content": "Z = \\{z_{1}, z_{2}, \\ldots, z_{N_{z}}\\}" + }, + { + "bbox": [ + 45, + 42, + 301, + 78 + ], + "type": "text", + "content": ". Consequently, the loss function for the generation process is formulated as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 78, + 83, + 299, + 114 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 83, + 299, + 114 + ], + "spans": [ + { + "bbox": [ + 78, + 83, + 299, + 114 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {Z} = - \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\sum_ {n _ {z} = 1} ^ {N _ {z}} \\log P \\left(z _ {i, n _ {z}} \\mid \\hat {z} _ {i, < n _ {z}}, X\\right), \\tag {23}", + "image_path": "3b4e213ad8da186c26e88ec593995e95a76be8a8184aef3a371646cd07e7d8ae.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "spans": [ + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "inline_equation", + "content": "z_{i,n_z}" + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "text", + "content": " is the ground truth token at position " + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "inline_equation", + "content": "n_z" + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "text", + "content": " for sample " + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "inline_equation", + "content": "\\hat{z}_{i, < n_z}" + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "text", + "content": " represents the generated sequence up to position " + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "inline_equation", + "content": "n_z - 1" + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "text", + "content": " for sample " + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "inline_equation", + "content": "P(z_{i,n_z} \\mid \\hat{z}_{i, < n_z}, X)" + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "text", + "content": " denotes the probability of generating token " + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "inline_equation", + "content": "z_{i,n_z}" + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "text", + "content": " conditioned on " + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "inline_equation", + "content": "\\hat{z}_{i, < n_z}" + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "inline_equation", + "content": "X" + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "text", + "content": ". In this rationale-aware learning framework, since all objectives are formulated as generative tasks, the loss functions " + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{SC}" + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{SRG}" + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{IRG}" + }, + { + "bbox": [ + 44, + 119, + 301, + 224 + ], + "type": "text", + "content": " are all employ the generative loss function, E.q. 23. Therefore, the objective function in the proposed method is formulated as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 228, + 299, + 251 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 228, + 299, + 251 + ], + "spans": [ + { + "bbox": [ + 56, + 228, + 299, + 251 + ], + "type": "interline_equation", + "content": "\\mathcal {L} = \\alpha \\mathcal {L} _ {\\mathrm {S C}} + \\frac {1 - \\alpha}{2} \\mathcal {L} _ {\\mathrm {S R G}} + \\frac {1 - \\alpha}{2} \\mathcal {L} _ {\\mathrm {I R G}} + \\lambda \\mathcal {L} _ {\\text {a l i g n}}, \\tag {24}", + "image_path": "3446df51ba868913d3f997aeafdb9b807e000ab1914dcdc123d1ab6bca2ee06f.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 255, + 301, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 255, + 301, + 291 + ], + "spans": [ + { + "bbox": [ + 44, + 255, + 301, + 291 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 44, + 255, + 301, + 291 + ], + "type": "inline_equation", + "content": "\\alpha, \\lambda \\in (0,1)" + }, + { + "bbox": [ + 44, + 255, + 301, + 291 + ], + "type": "text", + "content": " are tradeoff hyperparameters that regulate the relative contributions of each generative loss and the patch-token alignment." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 45, + 308, + 151, + 320 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 308, + 151, + 320 + ], + "spans": [ + { + "bbox": [ + 45, + 308, + 151, + 320 + ], + "type": "text", + "content": "4 EXPERIMENTS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 44, + 325, + 301, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 325, + 301, + 396 + ], + "spans": [ + { + "bbox": [ + 44, + 325, + 301, + 396 + ], + "type": "text", + "content": "In this section, we provide a comprehensive description of the experimental settings and evaluate the proposed method on three publicly available MASC datasets, benchmarking it against state-of-the-art methods. Furthermore, we perform an extensive series of studies to thoroughly analyze the effectiveness of the proposed approach." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 45, + 412, + 171, + 425 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 412, + 171, + 425 + ], + "spans": [ + { + "bbox": [ + 45, + 412, + 171, + 425 + ], + "type": "text", + "content": "4.1 Experimental Settings" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 45, + 427, + 117, + 439 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 427, + 117, + 439 + ], + "spans": [ + { + "bbox": [ + 45, + 427, + 117, + 439 + ], + "type": "text", + "content": "4.1.1 Datasets" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 44, + 442, + 301, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 442, + 301, + 594 + ], + "spans": [ + { + "bbox": [ + 44, + 442, + 301, + 594 + ], + "type": "text", + "content": "We utilize three widely recognized benchmark datasets for MASC [11], [81]: Twitter-2015, Twitter-2017, and the Political Twitter dataset. Each sample within these datasets comprises a user-generated multimodal image-text pair, including an image, a textual sentence, and one or more specific targets. Each aspect is annotated with a sentiment label from the set Positive, Negative, Neutral. The detailed statistics of these datasets are presented in Table 3. Furthermore, we incorporate semantic rationale (SR), impression rationale (IR), aesthetic captions for the entire image (AC), facial descriptions (FD), and aesthetic captions for objects (AO) for each data point. The maximum length for facial descriptions and aesthetic captions is constrained to 50 tokens." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 45, + 605, + 176, + 616 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 605, + 176, + 616 + ], + "spans": [ + { + "bbox": [ + 45, + 605, + 176, + 616 + ], + "type": "text", + "content": "4.1.2 Implementation Details" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 44, + 619, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 619, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 619, + 301, + 748 + ], + "type": "text", + "content": "We adopt the seq2seq model Flan-T5 [82] as the backbone of our generative framework. Specifically, the model is trained for 10 epochs using the AdamW optimizer [83], with a batch size of 4. A grid search is performed on the development set to determine the optimal learning rate, " + }, + { + "bbox": [ + 44, + 619, + 301, + 748 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 44, + 619, + 301, + 748 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 44, + 619, + 301, + 748 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 44, + 619, + 301, + 748 + ], + "type": "text", + "content": " for Flan-T5 across the three datasets. The selected values for learning rate are " + }, + { + "bbox": [ + 44, + 619, + 301, + 748 + ], + "type": "inline_equation", + "content": "3e - 4" + }, + { + "bbox": [ + 44, + 619, + 301, + 748 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 44, + 619, + 301, + 748 + ], + "type": "inline_equation", + "content": "3e - 4" + }, + { + "bbox": [ + 44, + 619, + 301, + 748 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 44, + 619, + 301, + 748 + ], + "type": "inline_equation", + "content": "1e - 4" + }, + { + "bbox": [ + 44, + 619, + 301, + 748 + ], + "type": "text", + "content": ", respectively, for the Twitter-2015, Twitter-2017 and Political Twitter. The trade-off hyperparameter sets " + }, + { + "bbox": [ + 44, + 619, + 301, + 748 + ], + "type": "inline_equation", + "content": "(\\alpha" + }, + { + "bbox": [ + 44, + 619, + 301, + 748 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 44, + 619, + 301, + 748 + ], + "type": "inline_equation", + "content": "\\lambda)" + }, + { + "bbox": [ + 44, + 619, + 301, + 748 + ], + "type": "text", + "content": " are 0.2, 0.1, 0.2 and 0.2, 0.5, 0.5, respectively, for the Twitter-2015, Twitter-2017 and Political Twitter. Consistent with prior research on MASC [11], [24]," + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 42, + 565, + 89 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 42, + 565, + 89 + ], + "spans": [ + { + "bbox": [ + 308, + 42, + 565, + 89 + ], + "type": "text", + "content": "we employ Accuracy (Acc) and F1 score (F1) as the evaluation metrics. The model is implemented using PyTorch, and experiments are conducted on an NVIDIA V100 GPU with 30 GB of memory." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 309, + 102, + 430, + 114 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 102, + 430, + 114 + ], + "spans": [ + { + "bbox": [ + 309, + 102, + 430, + 114 + ], + "type": "text", + "content": "4.2 Compared Baselines" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 308, + 118, + 565, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 118, + 565, + 165 + ], + "spans": [ + { + "bbox": [ + 308, + 118, + 565, + 165 + ], + "type": "text", + "content": "We conducted a comprehensive comparative evaluation of the proposed method against a range of robust baseline approaches, which are classified into three categories. The first category consists of image-only methods:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 323, + 170, + 564, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 170, + 564, + 205 + ], + "spans": [ + { + "bbox": [ + 323, + 170, + 564, + 205 + ], + "type": "text", + "content": "- Res-Target [84] leverages ResNet as its backbone to extract visual features exclusively for predicting the sentiment of the specified target." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 309, + 210, + 530, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 210, + 530, + 223 + ], + "spans": [ + { + "bbox": [ + 309, + 210, + 530, + 223 + ], + "type": "text", + "content": "The second category includes text-only approaches:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 323, + 227, + 564, + 331 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 323, + 227, + 564, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 227, + 564, + 260 + ], + "spans": [ + { + "bbox": [ + 323, + 227, + 564, + 260 + ], + "type": "text", + "content": "- MemNet [85] employs a stacked architecture of multiple memory networks to build deep memory networks." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 323, + 262, + 564, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 262, + 564, + 297 + ], + "spans": [ + { + "bbox": [ + 323, + 262, + 564, + 297 + ], + "type": "text", + "content": "- MGAN [86] is based on a multi-grained attention architecture designed to adaptively capture both coarse-grained and fine-grained interactions." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 323, + 297, + 564, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 297, + 564, + 331 + ], + "spans": [ + { + "bbox": [ + 323, + 297, + 564, + 331 + ], + "type": "text", + "content": "- BERT [87] is a powerful pre-trained language model trained using a masked language modeling objective and next sentence prediction." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 309, + 337, + 564, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 337, + 564, + 361 + ], + "spans": [ + { + "bbox": [ + 309, + 337, + 564, + 361 + ], + "type": "text", + "content": "Finally, this study incorporates the following advanced image-text multimodal approaches:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 323, + 365, + 564, + 747 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 323, + 365, + 564, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 365, + 564, + 423 + ], + "spans": [ + { + "bbox": [ + 323, + 365, + 564, + 423 + ], + "type": "text", + "content": "- MIMN [88] comprises two customized interactive memory networks designed to capture both inter-modal dynamics between different modalities and intra-modal dynamics within each individual modality." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 323, + 423, + 564, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 423, + 564, + 469 + ], + "spans": [ + { + "bbox": [ + 323, + 423, + 564, + 469 + ], + "type": "text", + "content": "- ESAFN [12] is a target-sensitive interaction and fusion network designed to adaptively capture interactive features across modalities while also modeling intra-modality features." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 323, + 469, + 564, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 469, + 564, + 515 + ], + "spans": [ + { + "bbox": [ + 323, + 469, + 564, + 515 + ], + "type": "text", + "content": "- TomBERT [11] utilizes BERT and ResNet as backbone models for encoding textual and visual content, respectively. Cross-modal fusion is accomplished by integrating these features into a BERT encoder." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 323, + 515, + 564, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 515, + 564, + 560 + ], + "spans": [ + { + "bbox": [ + 323, + 515, + 564, + 560 + ], + "type": "text", + "content": "- JML-MASC [44] jointly extracts the specific targets and identifies their sentiment polarity by utilizing a visual de-nosing mechanism and attention-based fusion framework." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 323, + 562, + 564, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 562, + 564, + 608 + ], + "spans": [ + { + "bbox": [ + 323, + 562, + 564, + 608 + ], + "type": "text", + "content": "- EF-CapTrBERT [17] converts visual content into an auxiliary sentence, which is then combined with the input sentence and processed through a BERT encoder for sentiment prediction." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 323, + 608, + 564, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 608, + 564, + 642 + ], + "spans": [ + { + "bbox": [ + 323, + 608, + 564, + 642 + ], + "type": "text", + "content": "- VLP-MABSA [14] is a task-specific pre-trained generative framework for multimodal aspect-based sentiment analysis, built on the BART architecture." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 323, + 642, + 564, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 642, + 564, + 688 + ], + "spans": [ + { + "bbox": [ + 323, + 642, + 564, + 688 + ], + "type": "text", + "content": "- FITE [23] is a translation-based approach, which captures facial features in the image and translates them into a corresponding facial description as an auxiliary sentence for sentiment classification." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 323, + 689, + 564, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 689, + 564, + 747 + ], + "spans": [ + { + "bbox": [ + 323, + 689, + 564, + 747 + ], + "type": "text", + "content": "- CMMT-MASC [15] is a cross-modal multi-task Transformer designed for MASC. Additionally, it employs multimodal gating mechanisms to dynamically regulate the flow of textual and visual information during interactions." + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 317, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 317, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 317, + 35 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 95, + 100, + 513, + 299 + ], + "blocks": [ + { + "bbox": [ + 48, + 44, + 560, + 90 + ], + "lines": [ + { + "bbox": [ + 48, + 44, + 560, + 90 + ], + "spans": [ + { + "bbox": [ + 48, + 44, + 560, + 90 + ], + "type": "text", + "content": "TABLE 3 Detailed Statistics of Twitter-2015, Twitter-2017, and Political Twitter datasets. The \"#sentence\" refers to the total number of sentences. \"#Avg. Length\" denotes the average length of sentences, while \"#Avg. Aspect\" indicates the average number of aspects in a sentence. \"#Avg. Length of SR\", \"#Avg. Length of IR\", \"#Avg. Length of AC\", \"#Avg. Length of FD\", and \"#Avg. Length of AO\" correspond to the average lengths of semantic rationales (SR), impression rationales (IR), aesthetic captions for the entire image, facial descriptions, and aesthetic captions for objects." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 95, + 100, + 513, + 299 + ], + "lines": [ + { + "bbox": [ + 95, + 100, + 513, + 299 + ], + "spans": [ + { + "bbox": [ + 95, + 100, + 513, + 299 + ], + "type": "table", + "html": "
LabelTwitter-2015Twitter-2017Political Twitter
TrainDevTestTrainDevTestTrainDevTest
Positive92830331715085154933318570176
Neutral188367060716385175734697823368
Negative368149113416144168887166305
Total31791122103735621176123489021559849
#Sentence210172767417465775875105900407
#Avg. Length16.7216.7417.0516.2116.3716.3816.6216.6716.59
#Avg. Aspect1.511.541.542.042.042.101.741.732.09
#Avg. Length of SR42.542.442.542.642.843.042.742.642.2
#Avg. Length of IR56.756.055.755.556.155.455.956.156.3
#Avg. Length of AC35.935.935.532.532.531.634.034.233.3
#Avg. Length of FD39.238.537.838.938.539.339.038.438.7
#Avg. Length of AO29.129.730.328.929.428.929.129.131.3
", + "image_path": "703550cb2e9d3a9aa1f9ae05fed89527e34c05dc0ccac49b3b4ddcc077b7558f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 59, + 318, + 301, + 664 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 59, + 318, + 301, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 318, + 301, + 376 + ], + "spans": [ + { + "bbox": [ + 59, + 318, + 301, + 376 + ], + "type": "text", + "content": "- HIMT [89] is a Transformer framework that incorporates a hierarchical interaction component to model the relationships between specific aspects and the input sentence, as well as the interactions between specific aspects and object-level visual content." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 59, + 376, + 301, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 376, + 301, + 422 + ], + "spans": [ + { + "bbox": [ + 59, + 376, + 301, + 422 + ], + "type": "text", + "content": "- IMT [13] is a coarse-to-fine-grained multimodal matching network that predicts image-target relevance and performs object-target alignment to support sentiment polarity identification." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 59, + 422, + 301, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 422, + 301, + 458 + ], + "spans": [ + { + "bbox": [ + 59, + 422, + 301, + 458 + ], + "type": "text", + "content": "- CoolNet [19] is a fine-grained cross-modal alignment approach that aligns textual and visual content from both semantic and syntactic perspectives." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 59, + 458, + 301, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 458, + 301, + 515 + ], + "spans": [ + { + "bbox": [ + 59, + 458, + 301, + 515 + ], + "type": "text", + "content": "- UnifiedTMSC [90] introduces a descriptive prompt paraphrasing paradigm to generate paraphrased prompts, while optimizing image vectors within the multimodal representation space of vision and language." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 59, + 515, + 301, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 515, + 301, + 561 + ], + "spans": [ + { + "bbox": [ + 59, + 515, + 301, + 561 + ], + "type": "text", + "content": "- VEMP [91] decodes the semantic information of visual elements by utilizing textual tokens in the image, target-aware adjective-noun pairs, and image captions." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 59, + 561, + 301, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 561, + 301, + 618 + ], + "spans": [ + { + "bbox": [ + 59, + 561, + 301, + 618 + ], + "type": "text", + "content": "- Atlantis-MASC [22] is a trident-shaped, aesthetic-driven approach for joint MABSA, which integrates image aesthetic attributes and achieves effective alignment of vision and text across multiple granular levels." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 59, + 618, + 301, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 59, + 618, + 301, + 664 + ], + "spans": [ + { + "bbox": [ + 59, + 618, + 301, + 664 + ], + "type": "text", + "content": "- MDCA [24] is a generative framework proposed to provide supplementary reasoning and explicit rationales to explain why specific content conveys certain sentiment." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 45, + 685, + 129, + 696 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 685, + 129, + 696 + ], + "spans": [ + { + "bbox": [ + 45, + 685, + 129, + 696 + ], + "type": "text", + "content": "4.3 Main Results" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 44, + 700, + 300, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 700, + 300, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 700, + 300, + 748 + ], + "type": "text", + "content": "The main results are presented in Table 4. Given that the two additional rationale generation tasks contribute to improving sentiment prediction by providing explanations for the underlying causes of sentiment, we select the prediction" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 307, + 318, + 564, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 318, + 564, + 422 + ], + "spans": [ + { + "bbox": [ + 307, + 318, + 564, + 422 + ], + "type": "text", + "content": "results from sentiment classification " + }, + { + "bbox": [ + 307, + 318, + 564, + 422 + ], + "type": "inline_equation", + "content": "\\hat{y}^{\\mathrm{sc}}" + }, + { + "bbox": [ + 307, + 318, + 564, + 422 + ], + "type": "text", + "content": " as the primary outcomes for accuracy and F1 score evaluation. As presented in Table 4, the proposed method demonstrates competitive performance on both Twitter datasets compared to strong baselines from both text-only and multimodal approaches. Specifically, it achieves the highest accuracy (81.61%) and F1 score (77.98%) on the Twitter-2015 dataset, as well as the best accuracy (75.62%) and a near-optimal F1 score (74.59%) on the Twitter-2017 dataset." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 307, + 423, + 564, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 423, + 564, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 423, + 564, + 748 + ], + "type": "text", + "content": "Compared to the image-only approach (Res-Target), the proposed method achieves a remarkable improvement of over " + }, + { + "bbox": [ + 307, + 423, + 564, + 748 + ], + "type": "inline_equation", + "content": "21.73\\%" + }, + { + "bbox": [ + 307, + 423, + 564, + 748 + ], + "type": "text", + "content": " in accuracy on the Twitter-2015 dataset. Similarly, when compared to the best-performing text-only method (BERT), the proposed method demonstrates a substantial performance gain, with a " + }, + { + "bbox": [ + 307, + 423, + 564, + 748 + ], + "type": "inline_equation", + "content": "7.46\\%" + }, + { + "bbox": [ + 307, + 423, + 564, + 748 + ], + "type": "text", + "content": " increase in accuracy and a " + }, + { + "bbox": [ + 307, + 423, + 564, + 748 + ], + "type": "inline_equation", + "content": "9.12\\%" + }, + { + "bbox": [ + 307, + 423, + 564, + 748 + ], + "type": "text", + "content": " improvement in F1 on Twitter-2015. These observations underscore the limitations of single-modality approaches in capturing subtle sentiment cues from multimodal content. Moreover, the proposed method consistently outperforms recent multimodal models, such as UnifiedTMSC, Atlantis-MASC, and MDCA. For instance, UnifiedTMSC adopts a paraphrasing-based approach to enrich textual features but lacks explicit modeling of visual aesthetic-driven affective impact. On Twitter-2017, the proposed method achieves comparable F1 performance (74.59 vs. 74.70) while delivering higher accuracy (75.62 vs. 75.40), which highlights the complementary benefits of aesthetic affective resonance modeling. While Atlantis-MASC incorporates image aesthetics, it primarily relies on global alignment techniques, which may overlook the intricate relationships between aspects and objects. The proposed method surpasses Atlantis-MASC by " + }, + { + "bbox": [ + 307, + 423, + 564, + 748 + ], + "type": "inline_equation", + "content": "1.58\\%" + }, + { + "bbox": [ + 307, + 423, + 564, + 748 + ], + "type": "text", + "content": " in accuracy on Twitter-2017, underscoring the efficacy of its patch-token level and object-level alignment in capturing aspect-specific visual details. While MDCA incorporates reasoning and direct causality to explain sentiment causes, it primarily emphasizes textual semantic reasoning, which restricts its" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 558, + 26, + 564, + 34 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 83, + 73, + 525, + 362 + ], + "blocks": [ + { + "bbox": [ + 45, + 44, + 564, + 64 + ], + "lines": [ + { + "bbox": [ + 45, + 44, + 564, + 64 + ], + "spans": [ + { + "bbox": [ + 45, + 44, + 564, + 64 + ], + "type": "text", + "content": "TABLE 4 The main results " + }, + { + "bbox": [ + 45, + 44, + 564, + 64 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 45, + 44, + 564, + 64 + ], + "type": "text", + "content": " are presented with the best-performing results highlighted in bold and the second-best values indicated with underlined text." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 83, + 73, + 525, + 362 + ], + "lines": [ + { + "bbox": [ + 83, + 73, + 525, + 362 + ], + "spans": [ + { + "bbox": [ + 83, + 73, + 525, + 362 + ], + "type": "table", + "html": "
ModalityModelVenueTwitter-2015Twitter-2017Political Twitter
AccF1AccF1AccF1
Image OnlyRes-TargetCVPR 201659.8846.4858.5953.9860.2158.42
Text OnlyMemNetEMNLP 201670.1161.7664.1860.90--
MGANEMNLP 201871.1764.2164.7561.4667.3762.78
BERTNAACL 201974.1568.8668.1565.2369.4164.25
Image and TextMIMNAAAI 201971.8465.6965.8862.9970.5265.39
ESAFNTASLP 201973.3867.3767.8364.2269.2264.66
TomBERTIJCAI 201977.1571.1570.3468.0369.6562.35
JML-MASCEMNLP 202178.70-72.70-70.1468.37
EF-CapTrBERTACM MM 202178.0173.2569.7768.4269.0464.94
VLP-MABSAACL 202278.6073.8073.8071.8070.3269.64
CMMT-MASCIPM 202277.90-73.8---
FITEEMNLP 202278.4973.9070.9068.7068.6465.83
HIMTTAC 202278.1473.6871.1469.16--
IMTIJCAI 202278.2774.1972.6171.9769.9267.86
CoolNetIPM 202379.9275.2871.6469.5870.9170.25
UnifiedTMSCEMNLP 202379.8076.3075.4074.70--
VEMPEMNLP 202378.8875.0973.0172.42--
Atlantis-MASCINFFUS 202479.03-74.20-69.8368.97
MDCATNNLS 202480.7177.1573.9172.3771.3870.94
OursChimera-81.6177.9875.6274.5972.5672.32
", + "image_path": "9a52e7f0d93ba6fe3d0396f07a292739097a1fad75755af749ec8dd06624ef3c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 380, + 301, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 380, + 301, + 475 + ], + "spans": [ + { + "bbox": [ + 44, + 380, + 301, + 475 + ], + "type": "text", + "content": "ability to effectively capture detailed visual content and the corresponding aesthetic affective resonance. In contrast, the proposed method surpasses MDCA with a " + }, + { + "bbox": [ + 44, + 380, + 301, + 475 + ], + "type": "inline_equation", + "content": "0.90\\%" + }, + { + "bbox": [ + 44, + 380, + 301, + 475 + ], + "type": "text", + "content": " improvement in accuracy and a " + }, + { + "bbox": [ + 44, + 380, + 301, + 475 + ], + "type": "inline_equation", + "content": "0.83\\%" + }, + { + "bbox": [ + 44, + 380, + 301, + 475 + ], + "type": "text", + "content": " increase in F1 on the Twitter-2015 dataset. This performance gain highlights the advantages of comprehensively understanding sentiment causality from both visual-textual semantic and affective resonance perspectives." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 488, + 192, + 500 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 488, + 192, + 500 + ], + "spans": [ + { + "bbox": [ + 44, + 488, + 192, + 500 + ], + "type": "text", + "content": "4.4 Results on Political Twitter" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 503, + 300, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 503, + 300, + 585 + ], + "spans": [ + { + "bbox": [ + 44, + 503, + 300, + 585 + ], + "type": "text", + "content": "The Political Twitter dataset differs significantly from Twitter-2015 and Twitter-2017, especially due to its challenging domain shift between training, development, and test sets. Such domain differences create substantial barriers to generalization, which makes the task particularly suitable for advanced models that can comprehend subtle causality and context shifts." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 596, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 596, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 596, + 301, + 748 + ], + "type": "text", + "content": "From Table 4, it can be observed that the proposed Chimera demonstrates distinct advantages over existing approaches on the Political Twitter dataset. Compared to the third best performing method CoolNet, which achieved " + }, + { + "bbox": [ + 44, + 596, + 301, + 748 + ], + "type": "inline_equation", + "content": "71.32\\%" + }, + { + "bbox": [ + 44, + 596, + 301, + 748 + ], + "type": "text", + "content": " accuracy and " + }, + { + "bbox": [ + 44, + 596, + 301, + 748 + ], + "type": "inline_equation", + "content": "69.64\\%" + }, + { + "bbox": [ + 44, + 596, + 301, + 748 + ], + "type": "text", + "content": " F1 score, Chimera showcases a significant improvement. Similarly, MDCA, which performed with an accuracy of " + }, + { + "bbox": [ + 44, + 596, + 301, + 748 + ], + "type": "inline_equation", + "content": "71.38\\%" + }, + { + "bbox": [ + 44, + 596, + 301, + 748 + ], + "type": "text", + "content": " and an F1 score of " + }, + { + "bbox": [ + 44, + 596, + 301, + 748 + ], + "type": "inline_equation", + "content": "70.94\\%" + }, + { + "bbox": [ + 44, + 596, + 301, + 748 + ], + "type": "text", + "content": ", still lags behind Chimera. Additionally, we observed that the discrepancy between accuracy and F1-score significantly narrows as accuracy increases, particularly when accuracy surpasses " + }, + { + "bbox": [ + 44, + 596, + 301, + 748 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 44, + 596, + 301, + 748 + ], + "type": "text", + "content": ". We hypothesize that the underlying cause may lie in the relatively balanced class distribution of sentiment categories (e.g., positive, neutral," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 307, + 380, + 566, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 380, + 566, + 464 + ], + "spans": [ + { + "bbox": [ + 307, + 380, + 566, + 464 + ], + "type": "text", + "content": "negative) within the Political Twitter test set (as shown in Table 3). At higher accuracy levels, the ratios of false positives to false negatives exhibit increasing symmetry across models. This equilibrium consequently reduces the divergence between precision and recall metrics, thereby causing the F1-score - defined as their harmonic mean - to naturally converge with accuracy." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 309, + 477, + 403, + 490 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 477, + 403, + 490 + ], + "spans": [ + { + "bbox": [ + 309, + 477, + 403, + 490 + ], + "type": "text", + "content": "4.5 Ablation Study" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 307, + 492, + 564, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 492, + 564, + 561 + ], + "spans": [ + { + "bbox": [ + 307, + 492, + 564, + 561 + ], + "type": "text", + "content": "To systematically investigate the influence of the linguistic-aware semantic alignment module, including semantic and impression rationale reasoning as well as object-level fine-grained alignment, on sentiment prediction, we conducted a series of ablation studies and the results are shown in Table 5." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 562, + 565, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 562, + 565, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 562, + 565, + 748 + ], + "type": "text", + "content": "As presented in Table 5, the exclusion of semantic rationale (\"w/o SRG\") results in a noticeable performance decline across all three datasets. This effect is particularly pronounced on the Twitter-2017 and Political Twitter datasets, where nearly all evaluation metrics, including accuracy and F1 score, exhibit a reduction of approximately " + }, + { + "bbox": [ + 307, + 562, + 565, + 748 + ], + "type": "inline_equation", + "content": "2\\%" + }, + { + "bbox": [ + 307, + 562, + 565, + 748 + ], + "type": "text", + "content": ". Similarly, the absence of impression rationale reasoning (\"w/o IRG\") results in performance fluctuations on the Twitter-2015 and Political Twitter datasets. However, the most noticeable effect is observed on the Twitter-2017 dataset, where the model's performance exhibits a significant degradation, particularly in the sentiment classification task, with nearly a " + }, + { + "bbox": [ + 307, + 562, + 565, + 748 + ], + "type": "inline_equation", + "content": "4\\%" + }, + { + "bbox": [ + 307, + 562, + 565, + 748 + ], + "type": "text", + "content": " drop in both accuracy and F1 score. The results (\"w/o IRG & AC\") reveal consistent performance degradation in both Accuracy and F1-score across all three datasets. Particularly noteworthy" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 25, + 317, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 25, + 317, + 34 + ], + "spans": [ + { + "bbox": [ + 45, + 25, + 317, + 34 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 565, + 34 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 46, + 110, + 566, + 215 + ], + "blocks": [ + { + "bbox": [ + 50, + 44, + 560, + 99 + ], + "lines": [ + { + "bbox": [ + 50, + 44, + 560, + 99 + ], + "spans": [ + { + "bbox": [ + 50, + 44, + 560, + 99 + ], + "type": "text", + "content": "TABLE 5 The results " + }, + { + "bbox": [ + 50, + 44, + 560, + 99 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 50, + 44, + 560, + 99 + ], + "type": "text", + "content": " of the ablation study for our Chimera model are presented. The top-performing values emphasized in bold and the second-best values distinguished using underlined text. The notations \"w/o SRG,\" \"w/o IRG,\" and \"w/o SRG & IRG\" denote the exclusion of the respective generative tasks. \"w/o IRG & AC\" refers to the removal of IR generation task and replace the aesthetic caption (AC) with general caption. \"w/o LSA\" represents the removal of the Linguistic-aware Semantic Alignment branch, while \"w/o OD\" indicates the exclusion of object-level descriptions (e.g., facial descriptions and object-level aesthetic captions) from the input sequence." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 46, + 110, + 566, + 215 + ], + "lines": [ + { + "bbox": [ + 46, + 110, + 566, + 215 + ], + "spans": [ + { + "bbox": [ + 46, + 110, + 566, + 215 + ], + "type": "table", + "html": "
MethodTwitter-2015Twitter-2017Political Twitter
AccF1AccF1AccF1AccF1AccF1AccF1AccF1AccF1AccF1
SCSRGIRGSCSRGIRGSCSRGIRG
Chimera81.6177.9881.1277.1177.5673.5575.6274.5975.0973.6471.9668.2372.5672.3271.6971.4069.3068.95
w/o SRG80.5276.10--75.8370.9673.5072.49--70.6667.2070.4369.88--68.2567.58
w/o IRG80.2375.2280.0375.42--71.8870.1672.670.73--71.1570.7071.0170.52--
w/o IRG & AC80.6776.0380.1176.46--71.5969.8372.2570.33--70.6270.0671.0470.47--
w/o SRG & IRG77.2471.82----71.2368.98----67.8867.20----
w/o LSA80.5477.0379.7576.2276.5272.0373.7270.9674.3872.2671.3667.8871.8671.3770.9270.5568.4367.99
w/o OD79.9676.0880.0976.3277.1272.8473.0670.8574.3772.3671.1167.5371.6471.1271.1270.7768.5568.07
w/o Aes-cap80.0375.2779.9476.0575.6971.0872.3671.6472.2871.2169.2865.4469.4368.9469.3769.0067.8567.27
", + "image_path": "b345948b6fc756b2505bb224ee746b6e0583cff242cc6fb931a837f9fd22f931.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 45, + 224, + 218, + 327 + ], + "blocks": [ + { + "bbox": [ + 45, + 224, + 218, + 327 + ], + "lines": [ + { + "bbox": [ + 45, + 224, + 218, + 327 + ], + "spans": [ + { + "bbox": [ + 45, + 224, + 218, + 327 + ], + "type": "image", + "image_path": "4da63db8ee69d0dc75759d822365f0103e1750ba58561bdf4257661e7d41d2c8.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 225, + 224, + 391, + 327 + ], + "blocks": [ + { + "bbox": [ + 225, + 224, + 391, + 327 + ], + "lines": [ + { + "bbox": [ + 225, + 224, + 391, + 327 + ], + "spans": [ + { + "bbox": [ + 225, + 224, + 391, + 327 + ], + "type": "image", + "image_path": "08133c089ed25791aec10225277c395d31439bd5d26d91d73a8056da7ca18d6f.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 399, + 224, + 565, + 327 + ], + "blocks": [ + { + "bbox": [ + 399, + 224, + 565, + 327 + ], + "lines": [ + { + "bbox": [ + 399, + 224, + 565, + 327 + ], + "spans": [ + { + "bbox": [ + 399, + 224, + 565, + 327 + ], + "type": "image", + "image_path": "ef430b2b0457b39e9f33224cf48720981aa5772b0bdcca9226f222196ae0fd43.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 45, + 339, + 219, + 437 + ], + "blocks": [ + { + "bbox": [ + 45, + 339, + 219, + 437 + ], + "lines": [ + { + "bbox": [ + 45, + 339, + 219, + 437 + ], + "spans": [ + { + "bbox": [ + 45, + 339, + 219, + 437 + ], + "type": "image", + "image_path": "d9c00fe46e11a776cce92beb84c5862921e6256a790e3398fc8e625562168895.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 45, + 448, + 228, + 459 + ], + "lines": [ + { + "bbox": [ + 45, + 448, + 228, + 459 + ], + "spans": [ + { + "bbox": [ + 45, + 448, + 228, + 459 + ], + "type": "text", + "content": "Fig. 2. Results " + }, + { + "bbox": [ + 45, + 448, + 228, + 459 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 45, + 448, + 228, + 459 + ], + "type": "text", + "content": " on hyper-parameter of " + }, + { + "bbox": [ + 45, + 448, + 228, + 459 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 45, + 448, + 228, + 459 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 45, + 448, + 228, + 459 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 45, + 448, + 228, + 459 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 225, + 338, + 391, + 436 + ], + "blocks": [ + { + "bbox": [ + 225, + 338, + 391, + 436 + ], + "lines": [ + { + "bbox": [ + 225, + 338, + 391, + 436 + ], + "spans": [ + { + "bbox": [ + 225, + 338, + 391, + 436 + ], + "type": "image", + "image_path": "be9a387bddeba4b58e166a6984c8c5b0867e9a4c5e45612fe0aa5451666328af.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 399, + 340, + 565, + 436 + ], + "blocks": [ + { + "bbox": [ + 399, + 340, + 565, + 436 + ], + "lines": [ + { + "bbox": [ + 399, + 340, + 565, + 436 + ], + "spans": [ + { + "bbox": [ + 399, + 340, + 565, + 436 + ], + "type": "image", + "image_path": "050be5835958eb54c08c32fca9f8350415aab027b5d014443ebe81b11f308c55.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 44, + 478, + 301, + 743 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 478, + 301, + 743 + ], + "spans": [ + { + "bbox": [ + 44, + 478, + 301, + 743 + ], + "type": "text", + "content": "is the model's inferior performance on Twitter-2017 and Political Twitter datasets compared to the baseline(w/o IRG). However, an unexpected performance improvement emerges in Twitter-2015, surpassing even the configuration retaining aesthetic captions as input. This phenomenon may be attributed to dataset-specific characteristics in sample distribution. As detailed in Table 3, Twitter-2015 exhibits a significantly higher proportion of neutral-class samples compared to Twitter-2017 and Political Twitter. When the Chimera model is deprived of its reasoning abilities for both semantic and impression rationales (\"w/o SRG & IRG\"), its performance on sentiment classification declines to the lowest levels across all datasets. Specifically, a consistent reduction of approximately " + }, + { + "bbox": [ + 44, + 478, + 301, + 743 + ], + "type": "inline_equation", + "content": "4 - 5\\%" + }, + { + "bbox": [ + 44, + 478, + 301, + 743 + ], + "type": "text", + "content": " is observed in nearly all metrics, underscoring the essential role of rationale-based reasoning in enhancing the effectiveness and accuracy of sentiment analysis tasks. These results show that the influence of rationale reasoning differs across datasets. For Twitter-2017, with its balanced sentiment distribution (see Table 3), impression rationale has a greater impact on sentiment analysis. In contrast, both semantic and impression rationales contribute to the other two datasets, but neither is dominant." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 307, + 491, + 566, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 491, + 566, + 641 + ], + "spans": [ + { + "bbox": [ + 307, + 491, + 566, + 641 + ], + "type": "text", + "content": "The LSA branch plays a pivotal role in the Chimera model by bridging the semantic gap between textual and visual modalities, ensuring effective alignment of information across visual and textual data. Its removal (w/o LSA) consistently leads to a significant decline in performance across all datasets, as evident in the ablation study. For instance, on Twitter-2015, the accuracy drops from " + }, + { + "bbox": [ + 307, + 491, + 566, + 641 + ], + "type": "inline_equation", + "content": "81.61\\%" + }, + { + "bbox": [ + 307, + 491, + 566, + 641 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 307, + 491, + 566, + 641 + ], + "type": "inline_equation", + "content": "80.54\\%" + }, + { + "bbox": [ + 307, + 491, + 566, + 641 + ], + "type": "text", + "content": ", and the F1 score decreases from " + }, + { + "bbox": [ + 307, + 491, + 566, + 641 + ], + "type": "inline_equation", + "content": "77.98\\%" + }, + { + "bbox": [ + 307, + 491, + 566, + 641 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 307, + 491, + 566, + 641 + ], + "type": "inline_equation", + "content": "77.03\\%" + }, + { + "bbox": [ + 307, + 491, + 566, + 641 + ], + "type": "text", + "content": ". Similarly, for Twitter-2017, accuracy, and F1 score dropped to " + }, + { + "bbox": [ + 307, + 491, + 566, + 641 + ], + "type": "inline_equation", + "content": "73.72\\%" + }, + { + "bbox": [ + 307, + 491, + 566, + 641 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 307, + 491, + 566, + 641 + ], + "type": "inline_equation", + "content": "70.96\\%" + }, + { + "bbox": [ + 307, + 491, + 566, + 641 + ], + "type": "text", + "content": ", respectively. By aligning linguistic and visual features, the branch allows the model to effectively interpret semantic overlaps and contrasts, enabling more accurate sentiment predictions." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 643, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 643, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 643, + 566, + 748 + ], + "type": "text", + "content": "Object-level descriptions (e.g., facial expressions and object-level aesthetic captions) enrich the input sequence by providing object-level detailed visual context. The ablation study reveals that removing OD (w/o OD) causes noticeable performance drops. On Twitter-2015, accuracy drops by 1.65 percentage points, and the F1 score decreases by 1.90 percentage points. Similarly, on Twitter-2017, accuracy is reduced by 2.56 percentage points, while the F1 score drops by 3.74 percentage points. Without the OD, the model" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 563, + 34 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 44, + 42, + 301, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 42, + 301, + 205 + ], + "spans": [ + { + "bbox": [ + 44, + 42, + 301, + 205 + ], + "type": "text", + "content": "loses access to these fine-grained visual features, leading to diminished interpretability and accuracy, particularly in datasets where visual information plays a crucial role in determining sentiment. Additionally, the aesthetic caption is excluded from the input sequence to assess its impact on performance (w/o Aes-cap). As demonstrated in Table 5, the absence of aesthetic features results in a noteworthy decline in performance across all datasets, particularly in the impression rationale generation (IRG) task. This leads to Chimera exhibiting the poorest sentiment classification performance for IRG on the Twitter-2017 and Political Twitter datasets, which underscore the importance of aesthetic captions in guiding the model to generate coherent and emotionally nuanced impressions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 216, + 188, + 227 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 216, + 188, + 227 + ], + "spans": [ + { + "bbox": [ + 44, + 216, + 188, + 227 + ], + "type": "text", + "content": "4.6 Hyper-parameter Analysis" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "spans": [ + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "text", + "content": "We conduct a hyperparameter analysis to explore the impact of " + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "text", + "content": " on the Chimera model's performance across the Twitter-2015, Twitter-2017, and Political Twitter datasets. Hyperparameter " + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "text", + "content": " regulates the balance between sentiment classification (SC) and rationale generation components (semantic and impression rationales, SRG, and IRG), while " + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "text", + "content": " controls the weight of patch-token alignment within the overall loss function. As shown in Figure 2, for all datasets, a lower " + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "text", + "content": ", which assigns greater weight to rationale generation, generally improves model performance, with values around 0.1 to 0.2 achieving the highest accuracy and F1 scores. This emphasizes the significance of integrating semantic and impression rationales in MASC. As " + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "text", + "content": " increases, favoring SC loss, performance plateaus or declines, particularly for the Political Twitter dataset, indicating that reduced emphasis on rationale generation diminishes the model's ability to capture fine-grained sentiment context effectively. Moreover, the results indicate that increasing " + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "text", + "content": " initially enhances model performance, with diminishing returns beyond a certain threshold. For the Twitter-2015 and Political Twitter datasets, moderate " + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "text", + "content": " values [0.2, 0.5] achieve optimal accuracy and F1 scores, while higher values (" + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "inline_equation", + "content": "\\lambda > 0.6" + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "text", + "content": ") lead to performance stabilization or slight decline. This observation indicates that balanced alignment between visual and textual features enhances the model's interpretability and accuracy and excessively high " + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "inline_equation", + "content": "\\lambda" + }, + { + "bbox": [ + 44, + 230, + 301, + 578 + ], + "type": "text", + "content": " values may negatively impact performance, likely due to overemphasis on alignment at the expense of core sentiment classification. For Twitter-2017, a similar trend is observed, although performance variations are less pronounced." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 45, + 590, + 165, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 590, + 165, + 601 + ], + "spans": [ + { + "bbox": [ + 45, + 590, + 165, + 601 + ], + "type": "text", + "content": "5 IN-DEPTH ANALYSIS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 605, + 202, + 616 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 605, + 202, + 616 + ], + "spans": [ + { + "bbox": [ + 44, + 605, + 202, + 616 + ], + "type": "text", + "content": "5.1 Quality Analysis of Rationale" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 619, + 300, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 619, + 300, + 746 + ], + "spans": [ + { + "bbox": [ + 44, + 619, + 300, + 746 + ], + "type": "text", + "content": "Table 6 provides an evaluation of the sentiment rationale quality for both the ground-truth and Chimera-generated content, aiming to analyze their impact on sentiment analysis. A pre-trained sentiment classification model [92] is employed to assess the intuitive sentiment quality of these rationales across three test datasets by inputting the rationales into the model and analyzing the sentiment predictions. For both SR and IR, the results in the GroundTruth row represent the upper performance bound. It is evident that the ground truth performance for SR significantly exceeds that of IR, indicating that semantic rationales" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 310, + 82, + 563, + 196 + ], + "blocks": [ + { + "bbox": [ + 318, + 43, + 556, + 72 + ], + "lines": [ + { + "bbox": [ + 318, + 43, + 556, + 72 + ], + "spans": [ + { + "bbox": [ + 318, + 43, + 556, + 72 + ], + "type": "text", + "content": "TABLE 6 The evaluation results " + }, + { + "bbox": [ + 318, + 43, + 556, + 72 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 318, + 43, + 556, + 72 + ], + "type": "text", + "content": " of rationale quality. The best-performing results highlighted in bold." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 82, + 563, + 196 + ], + "lines": [ + { + "bbox": [ + 310, + 82, + 563, + 196 + ], + "spans": [ + { + "bbox": [ + 310, + 82, + 563, + 196 + ], + "type": "table", + "html": "
Rationale SourceTwitter-2015Twitter-2017Political
AccF1AccF1AccF1
Semantic Rationale
Ground-Truth99.0499.0498.5498.5497.6497.64
Chimera80.9180.8375.0474.9370.2070.14
Impression Rationale
Ground-Truth69.9169.9072.7772.7176.876.87
Chimera63.4563.6561.6759.3860.5460.12
", + "image_path": "393ca141cdf3ad35aa86d194b22a9471891c0a558d4751aa6909446c174e5b0a.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 310, + 209, + 563, + 332 + ], + "blocks": [ + { + "bbox": [ + 310, + 209, + 563, + 332 + ], + "lines": [ + { + "bbox": [ + 310, + 209, + 563, + 332 + ], + "spans": [ + { + "bbox": [ + 310, + 209, + 563, + 332 + ], + "type": "image", + "image_path": "f035b5be188bc560f8e6ccdde0472213968c026b15af35b35081f552ab580ea2.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 309, + 343, + 563, + 354 + ], + "lines": [ + { + "bbox": [ + 309, + 343, + 563, + 354 + ], + "spans": [ + { + "bbox": [ + 309, + 343, + 563, + 354 + ], + "type": "text", + "content": "Fig. 3. Human evaluation of factuality, clarity and fluency for SR and IR." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 376, + 564, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 376, + 564, + 548 + ], + "spans": [ + { + "bbox": [ + 307, + 376, + 564, + 548 + ], + "type": "text", + "content": "are more critical for this task than impression rationales. We hypothesize that two factors contribute to this discrepancy. Firstly, as illustrated in Table 3, semantic rationales are shorter in length and straightforward, facilitating easy comprehension, while the emotions elicited by images are inherently more abstract and multifaceted. Secondly, the IR's reliance on visual cues contrasts sharply with the Twitter dataset's text-centric sentiment distribution. Prior research has shown that a considerable majority of targets (around " + }, + { + "bbox": [ + 307, + 376, + 564, + 548 + ], + "type": "inline_equation", + "content": "58\\%" + }, + { + "bbox": [ + 307, + 376, + 564, + 548 + ], + "type": "text", + "content": ") are absent from images [13], and most targets (93% in Twitter-2015) exhibit emotional coherence with their textual counterparts [93]. This misalignment underscores the dataset's limitations in evaluating IRs and necessitates a nuanced understanding of the interplay between visual and textual sentiment representations." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 307, + 550, + 564, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 550, + 564, + 723 + ], + "spans": [ + { + "bbox": [ + 307, + 550, + 564, + 723 + ], + "type": "text", + "content": "A total of 180 samples were randomly selected for human evaluation, with 100 samples drawn from the training set, 40 from the testing set, and 40 from the validation set of both the Twitter-2015 and Twitter-2017 datasets. Four native English speakers with Master's degrees in the arts were recruited to assess the quality of the rationale data based on three criteria: (1) factuality, evaluating whether the rationale is grounded in accurate and verifiable information; (2) clarity, assessing the logical structure and comprehensibility of the rationale; and (3) fluency, measuring the grammatical accuracy and smoothness of the language used. The Fleiss' Kappa " + }, + { + "bbox": [ + 307, + 550, + 564, + 723 + ], + "type": "inline_equation", + "content": "(\\kappa)" + }, + { + "bbox": [ + 307, + 550, + 564, + 723 + ], + "type": "text", + "content": " values for the initial evaluation across the four raters were as follows: factuality " + }, + { + "bbox": [ + 307, + 550, + 564, + 723 + ], + "type": "inline_equation", + "content": "\\kappa = 0.922" + }, + { + "bbox": [ + 307, + 550, + 564, + 723 + ], + "type": "text", + "content": ", clarity " + }, + { + "bbox": [ + 307, + 550, + 564, + 723 + ], + "type": "inline_equation", + "content": "\\kappa = 0.945" + }, + { + "bbox": [ + 307, + 550, + 564, + 723 + ], + "type": "text", + "content": ", and fluency " + }, + { + "bbox": [ + 307, + 550, + 564, + 723 + ], + "type": "inline_equation", + "content": "\\kappa = 0.960" + }, + { + "bbox": [ + 307, + 550, + 564, + 723 + ], + "type": "text", + "content": ". In cases of disagreement, the evaluators engaged in discussions to reach a consensus." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 308, + 723, + 565, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 723, + 565, + 747 + ], + "spans": [ + { + "bbox": [ + 308, + 723, + 565, + 747 + ], + "type": "text", + "content": "Figure 3 presents the results of the human evaluation. It can be observed that SR consistently exhibits higher quality" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 44, + 41, + 566, + 233 + ], + "blocks": [ + { + "bbox": [ + 44, + 41, + 566, + 233 + ], + "lines": [ + { + "bbox": [ + 44, + 41, + 566, + 233 + ], + "spans": [ + { + "bbox": [ + 44, + 41, + 566, + 233 + ], + "type": "image", + "image_path": "d0fccd6c9c3806a0763a2979525d5a81dbcd338f8180464516628387f00a7ac6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 44, + 242, + 451, + 254 + ], + "lines": [ + { + "bbox": [ + 44, + 242, + 451, + 254 + ], + "spans": [ + { + "bbox": [ + 44, + 242, + 451, + 254 + ], + "type": "text", + "content": "Fig. 4. Assessment of sentiment intensity for SR and IR in both ground truth data and Chimera-generated content." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 272, + 302, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 272, + 302, + 389 + ], + "spans": [ + { + "bbox": [ + 44, + 272, + 302, + 389 + ], + "type": "text", + "content": "across all metrics, which verifies that the employed LLM is capable of generating appropriate rationale data for specific tasks when provided with concrete ground-truth labels. In comparison to SR, IR demands a more in-depth understanding of visual content and is inherently more subjective. Consequently, IR is more prone to issues of factuality and clarity, as interpreting the abstract aesthetic and emotional elements conveyed by an image often involves subjective reasoning, which may lead to misalignment with objective ground truths or human expectations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 44, + 397, + 226, + 409 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 397, + 226, + 409 + ], + "spans": [ + { + "bbox": [ + 44, + 397, + 226, + 409 + ], + "type": "text", + "content": "5.2 Quantitative Analysis of Rationale" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 44, + 411, + 300, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 411, + 300, + 539 + ], + "spans": [ + { + "bbox": [ + 44, + 411, + 300, + 539 + ], + "type": "text", + "content": "We conduct a quantitative analysis on the test sets of ground truth and Chimera-generated content to examine the impact of varying levels of sentiment intensity in cognitive rationales on the accuracy of sentiment prediction, including their potential to amplify or diminish predictive performance. As illustrated in Figure 4, the sentiment intensity distributions of Twitter-2015 and Twitter-2017 reveal distinct patterns. Specifically, the sentiment intensity of IR demonstrates a noticeable bias toward positive values, whereas the sentiment intensity of SR aligns more closely with the sentiment polarity label distribution presented in Table 3." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 44, + 539, + 301, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 539, + 301, + 748 + ], + "spans": [ + { + "bbox": [ + 44, + 539, + 301, + 748 + ], + "type": "text", + "content": "This observation suggests that IR demonstrates a bias toward positive samples, increasing the model's confidence in predicting positive instances. While this bias may be beneficial for datasets with a higher proportion of positive samples (e.g., Twitter-2017), it could lead to additional bias in datasets with a limited representation of positive samples. This finding is further corroborated by the ablation study results, which reveal that the performance of the Chimera model without IR is worse on Twitter-2017 compared to its performance on Twitter-2015. Another notable observation is that, for the ground truth of the Political Twitter dataset, the sentiment intensity distribution of IR is relatively uniform across all ranges. In contrast, the Chimera-generated content for IR exhibits a more distinguishable sentiment intensity distribution compared to the ground truth, which further validates the quality of SR, the effectiveness of the proposed Chimera training paradigm, and the robustness of Chimera's performance." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 310, + 272, + 566, + 415 + ], + "blocks": [ + { + "bbox": [ + 310, + 272, + 566, + 415 + ], + "lines": [ + { + "bbox": [ + 310, + 272, + 566, + 415 + ], + "spans": [ + { + "bbox": [ + 310, + 272, + 566, + 415 + ], + "type": "image", + "image_path": "3acadf58ed5ee68a3128615d89f084eeca07b2f9bf82d1cd486f6dc56fa12528.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 308, + 426, + 565, + 447 + ], + "lines": [ + { + "bbox": [ + 308, + 426, + 565, + 447 + ], + "spans": [ + { + "bbox": [ + 308, + 426, + 565, + 447 + ], + "type": "text", + "content": "Fig. 5. Visualization of the top 15 most frequent aesthetic-related words in generated IR." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 308, + 465, + 534, + 478 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 465, + 534, + 478 + ], + "spans": [ + { + "bbox": [ + 308, + 465, + 534, + 478 + ], + "type": "text", + "content": "5.3 Impact of Aesthetic Attributes on Sentiment" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 307, + 480, + 566, + 748 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 480, + 566, + 748 + ], + "spans": [ + { + "bbox": [ + 307, + 480, + 566, + 748 + ], + "type": "text", + "content": "To investigate the impact of image aesthetic attributes on sentiment analysis, we visualize the frequency of aesthetic-related words within the impression rationales generated by our proposed Chimera model and its variant \"Chimera w/o Aes-cap\" on the Twitter-2015 and Twitter-2017 test sets. Specifically, we visualize the top 15 most frequent aesthetic-related words within the generated IR, based on the aesthetic attributes defined by Milena et al. [94]. As shown in Figure 5, the frequency analysis of aesthetic-related words for Chimera on Twitter-2015 and Twitter-2017 reveals that \"visual,\" \"vibrant,\" \"focus,\" and \"design\" prominently appear across both datasets. These terms, associated with visual clarity, expressive quality, image composition, and cohesiveness, align with the model's improved accuracy and F1 scores. However, excluding the aesthetic caption from the input results in subtle shifts in the frequency distribution of these aesthetic-related terms. For Twitter-2015, the overall frequency distribution of aesthetic-related terms shows minimal change, with a slight increase in \"focus\" and a decrease in \"vibrant\". In contrast, for Twitter-2017, \"focus\" shows a significant increase, while \"visual\" and \"vibrant\" decrease notably. Combined with the ablation study results in Table 5, the absence of aesthetic captions in the input" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 59, + 43, + 552, + 550 + ], + "blocks": [ + { + "bbox": [ + 59, + 43, + 552, + 550 + ], + "lines": [ + { + "bbox": [ + 59, + 43, + 552, + 550 + ], + "spans": [ + { + "bbox": [ + 59, + 43, + 552, + 550 + ], + "type": "table", + "html": "
Input image-text pair & Auxiliary sentencesMethodSemantic Rationale (SR of GT, Chimera and ChatGPT-4o) Reasoning Cause(RC of MDCA)Impression Rationale (IR of GT and Chimera)/ Direct Cause(DC of MDCA)Sentiment
RT @ MamiHerald : [Joanne Stiger]POS and Melissa Keller are the first same - sex couple to be married in Broward :Ground Truth (GT)Based on the image-text pair, the sentiment towards Joanne Stiger in the given text and image is positive, because the text highlights a historic and celebratory event of her marriage, and the image shows people smiling and appearing joyful, indicating happiness and approval.The image emanates a joyous and celebratory atmosphere, highlighted by the smiling faces and relaxed postures of the individuals present. The presence of photographers and the gathering of people around the couple enhance the sense of an important and positive milestone being reached. Overall, the visual conveys happiness, love, and a significant moment being warmly embraced by all involved.Positive
ChimeraThe sentiment towards Joanne Stiger in the given text and image is positive, because the text highlights her as the first same-sex couple to be married in Broward, and the image shows her smiling and looking happy, which conveys a sense of pride and joy.The image depicts two individuals who appear relaxed and happy, with one person appearing content and relaxed. The overall impression is one of happiness and contentment. The setting and body language suggest a sense of happiness, together with a pleasant and celebratory atmosphere.Positive
Aesthetic caption: The body language of the two people shows that they are in a very different way. The background shows a room with a lot of people and the cameraman is in the background.MDCABecause the tweet simply reports on a factual event without expressing any emotional response towards Joanne Stiger. The use of the word "first" suggests that the author is excited about the news and is highlighting the fact that Stiger and Keller are the first same-sex couple to be married in Broward. Overall, the tweet does not provide enough information to determine a positive or negative sentiment towards JoAnn Stiger herself.Because joanne stiger and Melissa keller are the first same - sex couple to be married in bronward.Neutral
GPT-4oBased on the image-text pair, the sentiment towards Joanne Stiger in the given text and image is Positive, because both the faces of Joanne Stiger and Melissa Keller, as well as the individuals around them, display expressions of happiness and joy, indicating a celebratory and momentous occasion.Positive
Primary Football Cup . [St Bede]' s v St Augustines friendly . . . both out but now for the friendly # sportsmanshipGround TruthBased on the image-text pair, the sentiment towards St Bede in the given text and image is neutral, because the text describes the event as a friendly match with a focus on sportsmanship, and the image shows a regular football game without any emotional or evaluative expressions towards St Bede.The image depicts a friendly football match between St Bede's and St Augustine's on a lush green field under a partly cloudy sky. The overall scene conveys a sense of camaraderie and sportsmanship, aligning well with the neutral sentiment label. The focus appears to be on enjoying the game and fostering friendly competition in a relaxed atmosphere.Neutral
ChimeraThe sentiment towards St Bede in the given text and image is neutral, because the text simply states that St Bede is participating in a friendly match without expressing any positive or negative emotions, and the image shows a standard football match scene without any clear emotional cues.The image of the St Bede vs. St Augustines football match conveys a sense of camaraderie and sportsmanship. The players are engaged in a moment of play, suggesting a positive and supportive atmosphere. The neutral sentiment label aligns with the overall feeling of cambered athleticism and teamwork, without evoking strong emotions.Neutral
Aesthetic caption: The position of the camera captures the players in action, as they are engaged in a game of soccer. the position of the camera captures the players in action, and the position of the camera captures the players in action.MDCABecause the writer is promoting the team's friendly football match with St. Albers, which suggests that they are a fan of the team. The use of the hashtag #sportsmanship also suggests that the writer has a positive opinion of St. Bede. Overall, the language and tone of the tweet suggest that the sentiment towards St. Bede is positive.It is a friendly football cup.Positive
GPT-4oBased on the image-text pair, the sentiment towards St Bede in the given text and image is Positive, because the text highlights a friendly and sportsmanlike attitude despite both teams being out of the competition, and the image shows players engaging in the match, fostering a positive environment.Positive
[Michael Oer]NEG is latest reminder that NFL pain lasts 24 / 7 / 365Ground TruthBased on the image-text pair, the sentiment towards Michael Oer in the given text and image is negative, because the text highlights the constant and enduring pain associated with his NFL career, implying a prolonged and difficult experience.The image of Michael Oer in his NFL uniform, combined with the sentiment label "negative", conveys a sense of enduring struggle and fatigue. His posture and facial expression suggest a feeling of exhaustion or frustration, which aligns with the text's implication that the pain and challenges of an NFL career are relentless and unforgiving. The overall impression is one of continuous physical and emotional burden.Negative
ChimeraThe sentiment towards Michael Oer in the given text and image is negative, because the text highlights that his recent reminder that NFL pain lasts 24/7/365, which is a serious and distressing reminder.The image of Michael Oer exudes a sense of determination and focus, aligning with the negative sentiment label. His serious expression and the context of NFL pain underscore a feeling of exhaustion and exhaustion, which complements the idea of prolonged pain and the accompanying reminder that "no matter how much you love it," evoking a negative feeling.Negative
MDCABecause the tweet does not express any clear positive or negative opinion towards him. The tweet simply states that he is a reminder that NFL pain lasts for a specific time period. There is no indication of any emotional attachment or opinion towards Michael Oer in the tweet, and the language used is informative rather than emotional. Therefore, the sentiment towards Michael Oer in this tweet is neutral.He is reminder that NFL pain lasts 24 / 7 / 365.Neutral
GPT-4oBased on the image-text pair, the sentiment towards Michael Oer in the given text and image is Negative, because the text highlights the unending pain associated with NFL careers, using Michael Oer as an example, and alludes to the physical and possibly emotional toll he endures.Negative
", + "image_path": "76169821fecec912e1e9076d4013b612fbcbf9e394366e9d11fb88f543258d70.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 44, + 562, + 565, + 591 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 562, + 565, + 591 + ], + "spans": [ + { + "bbox": [ + 44, + 562, + 565, + 591 + ], + "type": "text", + "content": "Fig. 6. Three examples showcasing the predictions generated by Chimera, MDCA, and GPT-4o are presented for analysis. During the evaluation process, GPT-4o exclusively produces the semantic rationale (SR). The input image-text pair and auxiliary sentences are utilized solely by Chimera. For MDCA, the reasoning cause (RC), direct cause (DC), and sentiment prediction are derived through direct inference." + } + ] + } + ], + "index": 3, + "type": "text" + }, + { + "bbox": [ + 44, + 610, + 301, + 738 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 44, + 610, + 301, + 738 + ], + "spans": [ + { + "bbox": [ + 44, + 610, + 301, + 738 + ], + "type": "text", + "content": "leads to the worst sentiment analysis performance across all datasets on IRG. This highlights the critical role of aesthetic captions in enhancing the model's understanding of image aesthetics, particularly in datasets like Twitter-2017 with balanced sentiment distributions. Specifically, attributes such as \"visual\" and \"vibrant\" positively contribute to sentiment analysis performance, whereas \"focus\" appears to significantly impair it. We speculate that since \"focus\" emphasizes specific image elements, potentially leads to an unbalanced interpretation of visual content. This localized emphasis can narrow the model's analytical scope, prioritizing details at" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 308, + 610, + 564, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 610, + 564, + 657 + ], + "spans": [ + { + "bbox": [ + 308, + 610, + 564, + 657 + ], + "type": "text", + "content": "the expense of broader context and compositional harmony. Consequently, the model may struggle to capture holistic aesthetic and emotional cues essential for accurate sentiment classification." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 309, + 673, + 524, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 673, + 524, + 685 + ], + "spans": [ + { + "bbox": [ + 309, + 673, + 524, + 685 + ], + "type": "text", + "content": "5.4 Comparison with Large Language Models" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 308, + 689, + 565, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 689, + 565, + 746 + ], + "spans": [ + { + "bbox": [ + 308, + 689, + 565, + 746 + ], + "type": "text", + "content": "We evaluate the performance of GPT-4o on the MASC task under a zero-shot setting. As shown in Table 7, GPT-4o achieves an accuracy of " + }, + { + "bbox": [ + 308, + 689, + 565, + 746 + ], + "type": "inline_equation", + "content": "46.87\\%" + }, + { + "bbox": [ + 308, + 689, + 565, + 746 + ], + "type": "text", + "content": " and an F1 score of " + }, + { + "bbox": [ + 308, + 689, + 565, + 746 + ], + "type": "inline_equation", + "content": "47.47\\%" + }, + { + "bbox": [ + 308, + 689, + 565, + 746 + ], + "type": "text", + "content": ", which is substantially lower than Chimera, which reports " + }, + { + "bbox": [ + 308, + 689, + 565, + 746 + ], + "type": "inline_equation", + "content": "81.61\\%" + }, + { + "bbox": [ + 308, + 689, + 565, + 746 + ], + "type": "text", + "content": " accuracy and " + }, + { + "bbox": [ + 308, + 689, + 565, + 746 + ], + "type": "inline_equation", + "content": "77.98\\%" + }, + { + "bbox": [ + 308, + 689, + 565, + 746 + ], + "type": "text", + "content": " F1 score. On the Twitter-2017" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 47, + 107, + 298, + 182 + ], + "blocks": [ + { + "bbox": [ + 47, + 44, + 298, + 98 + ], + "lines": [ + { + "bbox": [ + 47, + 44, + 298, + 98 + ], + "spans": [ + { + "bbox": [ + 47, + 44, + 298, + 98 + ], + "type": "text", + "content": "TABLE 7 The experimental results " + }, + { + "bbox": [ + 47, + 44, + 298, + 98 + ], + "type": "inline_equation", + "content": "(\\%)" + }, + { + "bbox": [ + 47, + 44, + 298, + 98 + ], + "type": "text", + "content": " of GPT-4o on the MASC task under a zero-shot setting are presented. The best-performing results highlighted in bold. The term \"dis\" refers to the percentage of samples where the sentiment polarity associated with a specific aspect cannot be discerned." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 47, + 107, + 298, + 182 + ], + "lines": [ + { + "bbox": [ + 47, + 107, + 298, + 182 + ], + "spans": [ + { + "bbox": [ + 47, + 107, + 298, + 182 + ], + "type": "table", + "html": "
MethodTwitter-2015Twitter-2017
AccF1DisAccF1Dis
Chimera81.6177.98-75.6274.59-
GPT-4o46.8747.470.256.0853.280.5
GPT-4o w/o image67.0262.38-59.6460.35-
", + "image_path": "f8457c3112501699ff84b3cfc411209dbad50c14d2e6528c484b1697dae5c507.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 217, + 299, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 217, + 299, + 493 + ], + "spans": [ + { + "bbox": [ + 47, + 217, + 299, + 493 + ], + "type": "text", + "content": "dataset, GPT-4o shows an improvement with an accuracy of " + }, + { + "bbox": [ + 47, + 217, + 299, + 493 + ], + "type": "inline_equation", + "content": "56.08\\%" + }, + { + "bbox": [ + 47, + 217, + 299, + 493 + ], + "type": "text", + "content": " and an F1 score of " + }, + { + "bbox": [ + 47, + 217, + 299, + 493 + ], + "type": "inline_equation", + "content": "53.28\\%" + }, + { + "bbox": [ + 47, + 217, + 299, + 493 + ], + "type": "text", + "content": ". However, this performance still trails behind Chimera, which reports " + }, + { + "bbox": [ + 47, + 217, + 299, + 493 + ], + "type": "inline_equation", + "content": "75.62\\%" + }, + { + "bbox": [ + 47, + 217, + 299, + 493 + ], + "type": "text", + "content": " accuracy and " + }, + { + "bbox": [ + 47, + 217, + 299, + 493 + ], + "type": "inline_equation", + "content": "74.59\\%" + }, + { + "bbox": [ + 47, + 217, + 299, + 493 + ], + "type": "text", + "content": " F1 score. Surprisingly, removing the image input results in an improvement in the model's accuracy and F1 score, reaching " + }, + { + "bbox": [ + 47, + 217, + 299, + 493 + ], + "type": "inline_equation", + "content": "67.02\\%" + }, + { + "bbox": [ + 47, + 217, + 299, + 493 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 47, + 217, + 299, + 493 + ], + "type": "inline_equation", + "content": "62.38\\%" + }, + { + "bbox": [ + 47, + 217, + 299, + 493 + ], + "type": "text", + "content": " on the Twitter-2015 dataset, respectively. This observation contrasts sharply with the phenomenon observed in the baseline model. Similarly, in the Twitter-2017 dataset, the performance of GPT-4o without image input is slightly better than with the image input. We speculate that in task-specific models, incorporating image data typically improves sentiment classification performance, as these models are finetuned to leverage multi-modal inputs effectively. However, in a zero-shot setting, GPT-4o operates based on its general pre-trained knowledge, which may not be fully optimized for combining textual and visual inputs for sentiment analysis. In this setting, adding image input may introduce noise rather than meaningful information. Moreover, GPT-4o has a low Dis value on both datasets, which slightly decreases to 0 when the image input is removed. This further suggests that the model's ability to distinguish sentiment polarity is, to a certain extent, influenced by the inclusion of the visual modality." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 511, + 122, + 523 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 511, + 122, + 523 + ], + "spans": [ + { + "bbox": [ + 47, + 511, + 122, + 523 + ], + "type": "text", + "content": "5.5 Case Study" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 527, + 299, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 527, + 299, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 527, + 299, + 746 + ], + "type": "text", + "content": "An additional case study is performed to provide a more comprehensive evaluation of the effectiveness of the proposed Chimera model. Figure 6 illustrates three representative examples, each corresponding to positive, neutral, and negative samples, respectively. As illustrated in the first example, MDCA is the sole model to predict \"Neutral\" for the target \"Joanne Stiger,\" whereas the other three models accurately predict \"Positive\". This result is primarily due to the RC and DC generated by MDCA, which lack the expression of positive or negative sentiment. Notably, the RC predominantly emphasizes the textual content, overlooking the joyful atmosphere conveyed through the image. In the second example, an intriguing observation is that the situation is the exact opposite of the previous case. Here, only Chimera correctly predicts the sentiment polarity of the specific target, \"St. Bede\" as \"Neutral\" whereas both GPT-4o and MDCA incorrectly classify it as \"Positive\". It is observed that the SR of GPT-4o and the RC of MDCA both convey a positive sentiment, largely due" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 310, + 43, + 563, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 43, + 563, + 227 + ], + "spans": [ + { + "bbox": [ + 310, + 43, + 563, + 227 + ], + "type": "text", + "content": "to an overinterpretation and extrapolation of the textual content. In contrast, Chimera demonstrates accurate prediction by appropriately integrating a balanced understanding of the image content and its aesthetic attributes. In the final example, both Chimera and GPT-4o accurately identify the sentiment polarity of \"Michael Oher\" as \"Negative\". MDCA's incorrect prediction of \"Neutral\" may be attributed to its generated RC and DC failing to account for the individual's expression, thereby overlooking critical semantic cues present in the visual content. With the aid of facial descriptions, Chimera effectively captures and aligns fine-grained emotional cues from visual content, enabling it to generate coherent SR and IR and achieve accurate predictions. The above representative instances further verify that incorporating cognitive and aesthetic sentiment causality enhances sentiment classification accuracy in MABSA." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 311, + 247, + 394, + 258 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 247, + 394, + 258 + ], + "spans": [ + { + "bbox": [ + 311, + 247, + 394, + 258 + ], + "type": "text", + "content": "6 CONCLUSION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 310, + 264, + 563, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 264, + 563, + 483 + ], + "spans": [ + { + "bbox": [ + 310, + 264, + 563, + 483 + ], + "type": "text", + "content": "In this paper, we propose a cognitive sentiment causality understanding framework tailored for multimodal aspect-based sentiment classification. The framework, which is novel in its approach, consists of four primary components: linguistic-aware semantic alignment, a translation module, rationale dataset construction, and rationale-aware learning. The linguistic-aware semantic alignment component facilitates visual patch-token level alignment through dynamic patch selection and semantic patch calibration. The translation module transforms holistic image and object-level visual information into corresponding emotion-laden textual representations. The rationale dataset construction involves designing refined prompts and leveraging LLMs to generate semantic and impression rationale. Finally, rationale-aware learning incorporates semantic explanations and affective-cognitive resonance to enhance the model's capacity to understand cognitive sentiment causality. Experimental results on three Twitter datasets demonstrate that the proposed Chimera achieves performance gains over SOTA baselines." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 311, + 503, + 416, + 514 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 503, + 416, + 514 + ], + "spans": [ + { + "bbox": [ + 311, + 503, + 416, + 514 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 310, + 521, + 563, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 521, + 563, + 670 + ], + "spans": [ + { + "bbox": [ + 310, + 521, + 563, + 670 + ], + "type": "text", + "content": "This research is supported by the Shanghai Science and Technology Innovation Action Plan (No. 24YF2710100), the Shanghai Special Project to Promote High-quality Industrial Development (No. RZ-CYAI-01-24-0288), the National Nature Science Foundation of China (No. 62477010), the Science and Technology Commission of Shanghai Municipality Grant (No. 22511105901, No. 21511100402), the Ministry of Education, Singapore under its MOE Academic Research Fund Tier 2 (STEM RIE2025 Award MOE-T2EP20123-0005) and by the RIE2025 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) (Award I2301E0026), administered by A\\*STAR, as well as supported by Alibaba Group and NTU Singapore." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 311, + 690, + 378, + 701 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 690, + 378, + 701 + ], + "spans": [ + { + "bbox": [ + 311, + 690, + 378, + 701 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 311, + 709, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 709, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 311, + 709, + 564, + 746 + ], + "type": "text", + "content": "[1] R. Mao, Q. Liu, K. He, W. Li, and E. Cambria, \"The biases of pretrained language models: An empirical study on prompt-based sentiment analysis and emotion detection,\" IEEE Transactions on Affective Computing, vol. 14, no. 3, pp. 1743-1753, 2023." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 47, + 26, + 317, + 34 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 26, + 317, + 34 + ], + "spans": [ + { + "bbox": [ + 47, + 26, + 317, + 34 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 43, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 47, + 43, + 301, + 71 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 43, + 301, + 71 + ], + "spans": [ + { + "bbox": [ + 47, + 43, + 301, + 71 + ], + "type": "text", + "content": "[2] K. Du, F. Xing, R. Mao, and E. Cambria, \"Financial sentiment analysis: Techniques and applications,\" ACM Computing Surveys, vol. 56, no. 9, pp. 1-42, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 72, + 301, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 72, + 301, + 99 + ], + "spans": [ + { + "bbox": [ + 47, + 72, + 301, + 99 + ], + "type": "text", + "content": "[3] R. Mao, M. Ge, S. Han, W. Li, K. He, L. Zhu, and E. Cambria, \"A survey on pragmatic processing techniques,\" Information Fusion, vol. 114, p. 102712, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 99, + 301, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 99, + 301, + 135 + ], + "spans": [ + { + "bbox": [ + 47, + 99, + 301, + 135 + ], + "type": "text", + "content": "[4] L. Xiao, Y. Xue, H. Wang, X. Hu, D. Gu, and Y. Zhu, \"Exploring fine-grained syntactic information for aspect-based sentiment classification with dual graph neural networks,\" Neurocomputing, vol. 471, pp. 48-59, 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 135, + 301, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 135, + 301, + 162 + ], + "spans": [ + { + "bbox": [ + 47, + 135, + 301, + 162 + ], + "type": "text", + "content": "[5] Y. Ma, R. Mao, Q. Lin, P. Wu, and E. Cambria, \"Quantitative stock portfolio optimization by multi-task learning risk and return,\" Information Fusion, vol. 104, p. 102165, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 163, + 301, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 163, + 301, + 198 + ], + "spans": [ + { + "bbox": [ + 47, + 163, + 301, + 198 + ], + "type": "text", + "content": "[6] K. Du, F. Xing, R. Mao, and E. Cambria, \"FinSenticNet: A concept-level lexicon for financial sentiment analysis,\" in 2023 IEEE Symposium Series on Computational Intelligence (SSCI). IEEE, 2023, pp. 109-114." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 199, + 301, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 199, + 301, + 236 + ], + "spans": [ + { + "bbox": [ + 47, + 199, + 301, + 236 + ], + "type": "text", + "content": "[7] X. Zhang, R. Mao, and E. Cambria, \"SenticVec: Toward robust and human-centric neurosymbolic sentiment analysis,\" in Findings of the Association for Computational Linguistics: ACL. Association for Computational Linguistics, 2024, pp. 4851-4863." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 236, + 301, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 236, + 301, + 272 + ], + "spans": [ + { + "bbox": [ + 47, + 236, + 301, + 272 + ], + "type": "text", + "content": "[8] S. Zhao, M. Jia, L. A. Tuan, F. Pan, and J. Wen, \"Universal vulnerabilities in large language models: Backdoor attacks for incontext learning,\" in Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, 2024, pp. 11507-11522." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 272, + 301, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 272, + 301, + 308 + ], + "spans": [ + { + "bbox": [ + 47, + 272, + 301, + 308 + ], + "type": "text", + "content": "[9] L. Zhu, R. Mao, E. Cambria, and B. J. Jansen, \"Neurosymbolic AI for personalized sentiment analysis,\" in Proceedings of International Conference on Human-Computer Interaction (HCII), 2024, pp. 269-290." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 308, + 301, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 308, + 301, + 345 + ], + "spans": [ + { + "bbox": [ + 47, + 308, + 301, + 345 + ], + "type": "text", + "content": "[10] S. Zhao, J. Wen, A. Luu, J. Zhao, and J. Fu, \"Prompt as triggers for backdoor attack: Examining the vulnerability in language models,\" in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, 2023, pp. 12303-12317." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 345, + 301, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 345, + 301, + 380 + ], + "spans": [ + { + "bbox": [ + 47, + 345, + 301, + 380 + ], + "type": "text", + "content": "[11] J. YU and J. JIANG, \"Adapting bert for target-oriented multimodal sentiment classification.(2019),\" in Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, 2019, pp. 5408-5414." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 381, + 301, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 381, + 301, + 418 + ], + "spans": [ + { + "bbox": [ + 47, + 381, + 301, + 418 + ], + "type": "text", + "content": "[12] J. Yu, J. Jiang, and R. Xia, \"Entity-sensitive attention and fusion network for entity-level multimodal sentiment classification,\" IEEE/ACM Transactions on Audio, Speech, and Language Processing, vol. 28, pp. 429-439, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 418, + 301, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 418, + 301, + 445 + ], + "spans": [ + { + "bbox": [ + 47, + 418, + 301, + 445 + ], + "type": "text", + "content": "[13] J. Yu, J. Wang, R. Xia, and J. Li, \"Targeted multimodal sentiment classification based on coarse-to-fine grained image-target matching.\" in *IJCAI*, 2022, pp. 4482-4488." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 446, + 301, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 446, + 301, + 482 + ], + "spans": [ + { + "bbox": [ + 47, + 446, + 301, + 482 + ], + "type": "text", + "content": "[14] Y. Ling, J. Yu, and R. Xia, \"Vision-language pre-training for multimodal aspect-based sentiment analysis,\" in Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2022, pp. 2149-2159." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 482, + 301, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 482, + 301, + 509 + ], + "spans": [ + { + "bbox": [ + 47, + 482, + 301, + 509 + ], + "type": "text", + "content": "[15] L. Yang, J.-C. Na, and J. Yu, \"Cross-modal multitask transformer for end-to-end multimodal aspect-based sentiment analysis,\" Information Processing & Management, vol. 59, no. 5, p. 103038, 2022." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 509, + 301, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 509, + 301, + 545 + ], + "spans": [ + { + "bbox": [ + 47, + 509, + 301, + 545 + ], + "type": "text", + "content": "[16] R. Zhou, W. Guo, X. Liu, S. Yu, Y. Zhang, and X. Yuan, \"Aom: Detecting aspect-oriented information for multimodal aspect-based sentiment analysis,\" in Findings of the Association for Computational Linguistics: ACL 2023, 2023, pp. 8184-8196." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 545, + 301, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 545, + 301, + 581 + ], + "spans": [ + { + "bbox": [ + 47, + 545, + 301, + 581 + ], + "type": "text", + "content": "[17] Z. Khan and Y. Fu, \"Exploiting bert for multimodal target sentiment classification through input space translation,\" in Proceedings of the 29th ACM international conference on multimedia, 2021, pp. 3034-3042." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 582, + 301, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 582, + 301, + 618 + ], + "spans": [ + { + "bbox": [ + 47, + 582, + 301, + 618 + ], + "type": "text", + "content": "[18] L. Xiao, E. Zhou, X. Wu, S. Yang, T. Ma, and L. He, \"Adaptive multi-feature extraction graph convolutional networks for multimodal target sentiment analysis,\" in 2022 IEEE International Conference on Multimedia and Expo (ICME). IEEE, 2022, pp. 1-6." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 618, + 301, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 618, + 301, + 654 + ], + "spans": [ + { + "bbox": [ + 47, + 618, + 301, + 654 + ], + "type": "text", + "content": "[19] L. Xiao, X. Wu, S. Yang, J. Xu, J. Zhou, and L. He, \"Cross-modal fine-grained alignment and fusion network for multimodal aspect-based sentiment analysis,\" Information Processing & Management, vol. 60, no. 6, p. 103508, 2023." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 655, + 301, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 655, + 301, + 691 + ], + "spans": [ + { + "bbox": [ + 47, + 655, + 301, + 691 + ], + "type": "text", + "content": "[20] Y. Huang, Z. Chen, J. Chen, J. Z. Pan, Z. Yao, and W. Zhang, \"Target-oriented sentiment classification with sequential cross-modal semantic graph,\" in International Conference on Artificial Neural Networks. Springer, 2023, pp. 587-599." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 47, + 691, + 301, + 727 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 691, + 301, + 727 + ], + "spans": [ + { + "bbox": [ + 47, + 691, + 301, + 727 + ], + "type": "text", + "content": "[21] Q. Wang, H. Xu, Z. Wen, B. Liang, M. Yang, B. Qin, and R. Xu, \"Image-to-text conversion and aspect-oriented filtration for multimodal aspect-based sentiment analysis,\" IEEE Transactions on Affective Computing, 2023." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 47, + 727, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 727, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 727, + 301, + 746 + ], + "type": "text", + "content": "[22] L. Xiao, X. Wu, J. Xu, W. Li, C. Jin, and L. He, \"Atlantis: Aesthetic-oriented multiple granularities fusion network for joint multi-" + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 44, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 44, + "blocks": [ + { + "bbox": [ + 328, + 44, + 564, + 63 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 44, + 564, + 63 + ], + "spans": [ + { + "bbox": [ + 328, + 44, + 564, + 63 + ], + "type": "text", + "content": "modal aspect-based sentiment analysis,\" Information Fusion, vol. 106, p. 102304, 2024." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 63, + 564, + 99 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 63, + 564, + 99 + ], + "spans": [ + { + "bbox": [ + 310, + 63, + 564, + 99 + ], + "type": "text", + "content": "[23] H. Yang, Y. Zhao, and B. Qin, \"Face-sensitive image-to-emotional-text cross-modal translation for multimodal aspect-based sentiment analysis,\" in Proceedings of the 2022 conference on empirical methods in natural language processing, 2022, pp. 3324-3335." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 100, + 564, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 100, + 564, + 136 + ], + "spans": [ + { + "bbox": [ + 310, + 100, + 564, + 136 + ], + "type": "text", + "content": "[24] R. Fan, T. He, M. Chen, M. Zhang, X. Tu, and M. Dong, \"Dual causes generation assisted model for multimodal aspect-based sentiment classification,\" IEEE Transactions on Neural Networks and Learning Systems, 2024." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 136, + 564, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 136, + 564, + 173 + ], + "spans": [ + { + "bbox": [ + 310, + 136, + 564, + 173 + ], + "type": "text", + "content": "[25] J. Wang, Z. Li, J. Yu, L. Yang, and R. Xia, \"Fine-grained multimodal named entity recognition and grounding with a generative framework,\" in Proceedings of the 31st ACM International Conference on Multimedia, 2023, pp. 3934-3943." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 174, + 564, + 210 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 174, + 564, + 210 + ], + "spans": [ + { + "bbox": [ + 310, + 174, + 564, + 210 + ], + "type": "text", + "content": "[26] X. Zhang, R. Mao, K. He, and E. Cambria, \"Neurosymbolic sentiment analysis with dynamic word sense disambiguation,\" in Findings of the Association for Computational Linguistics: EMNLP 2023, Singapore, 2023, pp. 8772-8783." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 210, + 564, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 210, + 564, + 246 + ], + "spans": [ + { + "bbox": [ + 310, + 210, + 564, + 246 + ], + "type": "text", + "content": "[27] Q. Lu, X. Sun, Y. Long, Z. Gao, J. Feng, and T. Sun, \"Sentiment analysis: Comprehensive reviews, recent advances, and open challenges,\" IEEE Transactions on Neural Networks and Learning Systems, 2023." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 247, + 564, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 247, + 564, + 282 + ], + "spans": [ + { + "bbox": [ + 310, + 247, + 564, + 282 + ], + "type": "text", + "content": "[28] H. Liu, W. Wang, and H. Li, \"Interpretable multimodal misinformation detection with logic reasoning,\" in Findings of the Association for Computational Linguistics: ACL 2023, 2023, pp. 9781-9796." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 284, + 564, + 321 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 284, + 564, + 321 + ], + "spans": [ + { + "bbox": [ + 310, + 284, + 564, + 321 + ], + "type": "text", + "content": "[29] R. Mao, K. Du, Y. Ma, L. Zhu, and E. Cambria, \"Discovering the cognition behind language: Financial metaphor analysis with MetaPro,\" in 2023 IEEE International Conference on Data Mining (ICDM). IEEE, 2023, pp. 1211-1216." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 321, + 564, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 321, + 564, + 367 + ], + "spans": [ + { + "bbox": [ + 310, + 321, + 564, + 367 + ], + "type": "text", + "content": "[30] E. Cambria, X. Zhang, R. Mao, M. Chen, and K. Kwok, \"SenticNet 8: Fusing emotion AI and commonsense AI for interpretable, trustworthy, and explainable affective computing,\" in Proceedings of International Conference on Human-Computer Interaction (HCI), Washington DC, USA, 2024, pp. 197-216." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 368, + 564, + 404 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 368, + 564, + 404 + ], + "spans": [ + { + "bbox": [ + 310, + 368, + 564, + 404 + ], + "type": "text", + "content": "[31] K. Du, R. Mao, F. Xing, and E. Cambria, \"Explainable stock price movement prediction using contrastive learning,\" in Proceedings of the 33rd ACM International Conference on Information and Knowledge Management (CIKM), Idaho, USA, 2024, pp. 529-537." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 404, + 564, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 404, + 564, + 432 + ], + "spans": [ + { + "bbox": [ + 310, + 404, + 564, + 432 + ], + "type": "text", + "content": "[32] H. Zhang, X. Zhou, Z. Shen, and Y. Li, \"Privfr: Privacy-enhanced federated recommendation with shared hash embedding,\" IEEE Transactions on Neural Networks and Learning Systems, 2024." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 310, + 432, + 564, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 432, + 564, + 468 + ], + "spans": [ + { + "bbox": [ + 310, + 432, + 564, + 468 + ], + "type": "text", + "content": "[33] E. Yang, L. Shen, G. Guo, X. Wang, X. Cao, J. Zhang, and D. Tao, \"Model merging in llms, mllms, and beyond: Methods, theories, applications and opportunities,\" arXiv preprint arXiv:2408.07666, 2024." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 310, + 468, + 564, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 468, + 564, + 514 + ], + "spans": [ + { + "bbox": [ + 310, + 468, + 564, + 514 + ], + "type": "text", + "content": "[34] L. Xiao, R. Mao, X. Zhang, L. He, and E. Cambria, \"Vanessa: Visual connotation and aesthetic attributes understanding network for multimodal aspect-based sentiment analysis,\" in Findings of the Association for Computational Linguistics: EMNLP 2024, 2024, pp. 11486-11500." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 310, + 514, + 564, + 569 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 514, + 564, + 569 + ], + "spans": [ + { + "bbox": [ + 310, + 514, + 564, + 569 + ], + "type": "text", + "content": "[35] J. Kruk, J. Lubin, K. Sikka, X. Lin, D. Jurafsky, and A. Divakaran, \"Integrating text and image: Determining multimodal document intent in instagram posts,\" in Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), 2019, pp. 4622-4632." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 310, + 569, + 564, + 607 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 569, + 564, + 607 + ], + "spans": [ + { + "bbox": [ + 310, + 569, + 564, + 607 + ], + "type": "text", + "content": "[36] H. Liu, W. Wang, and H. Li, \"Towards multi-modal sarcasm detection via hierarchical congruity modeling with knowledge enhancement,\" in Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, 2022, pp. 4995-5006." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 310, + 607, + 564, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 607, + 564, + 643 + ], + "spans": [ + { + "bbox": [ + 310, + 607, + 564, + 643 + ], + "type": "text", + "content": "[37] R. Mao and X. Li, \"Bridging towers of multi-task learning with a gating mechanism for aspect-based sentiment analysis and sequential metaphor identification,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 35, 2021, pp. 13534-13542." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 643, + 564, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 643, + 564, + 672 + ], + "spans": [ + { + "bbox": [ + 310, + 643, + 564, + 672 + ], + "type": "text", + "content": "[38] T. Yue, R. Mao, H. Wang, Z. Hu, and E. Cambria, \"KnowleNet: Knowledge fusion network for multimodal sarcasm detection,\" Information Fusion, vol. 100, p. 101921, 2023." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 310, + 672, + 564, + 699 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 672, + 564, + 699 + ], + "spans": [ + { + "bbox": [ + 310, + 672, + 564, + 699 + ], + "type": "text", + "content": "[39] C. Fan, J. Lin, R. Mao, and E. Cambria, \"Fusing pairwise modalities for emotion recognition in conversations,\" Information Fusion, vol. 106, p. 102306, 2024." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 310, + 700, + 564, + 736 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 700, + 564, + 736 + ], + "spans": [ + { + "bbox": [ + 310, + 700, + 564, + 736 + ], + "type": "text", + "content": "[40] L. Yang, Z. Wang, Z. Li, J.-C. Na, and J. Yu, \"An empirical study of multimodal entity-based sentiment analysis with chatgpt: Improving in-context learning via entity-aware contrastive learning,\" Information Processing & Management, vol. 61, no. 4, p. 103724, 2024." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 310, + 736, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 736, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 736, + 564, + 746 + ], + "type": "text", + "content": "[41] L. Yang, J. Wang, J.-C. Na, and J. Yu, \"Generating paraphrase sen" + } + ] + } + ], + "index": 43 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 46, + 43, + 301, + 746 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 63, + 43, + 301, + 63 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 43, + 301, + 63 + ], + "spans": [ + { + "bbox": [ + 63, + 43, + 301, + 63 + ], + "type": "text", + "content": "tences for multimodal entity-category-sentiment triple extraction,\" Knowledge-Based Systems, vol. 278, p. 110823, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 46, + 63, + 301, + 91 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 63, + 301, + 91 + ], + "spans": [ + { + "bbox": [ + 46, + 63, + 301, + 91 + ], + "type": "text", + "content": "[42] J. Zhou, J. Zhao, J. X. Huang, Q. V. Hu, and L. He, \"Masad: A large-scale dataset for multimodal aspect-based sentiment analysis,\" Neurocomputing, vol. 455, pp. 47-58, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 91, + 301, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 91, + 301, + 129 + ], + "spans": [ + { + "bbox": [ + 47, + 91, + 301, + 129 + ], + "type": "text", + "content": "[43] W. Zhang, X. Li, Y. Deng, L. Bing, and W. Lam, \"A survey on aspect-based sentiment analysis: Tasks, methods, and challenges,\" IEEE Transactions on Knowledge and Data Engineering, vol. 35, no. 11, pp. 11019-11038, 2022." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 129, + 301, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 129, + 301, + 166 + ], + "spans": [ + { + "bbox": [ + 47, + 129, + 301, + 166 + ], + "type": "text", + "content": "[44] X. Ju, D. Zhang, R. Xiao, J. Li, S. Li, M. Zhang, and G. Zhou, \"Joint multi-modal aspect-sentiment analysis with auxiliary cross-modal relation detection,\" in Proceedings of the 2021 conference on empirical methods in natural language processing, 2021, pp. 4395-4405." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 166, + 301, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 166, + 301, + 203 + ], + "spans": [ + { + "bbox": [ + 47, + 166, + 301, + 203 + ], + "type": "text", + "content": "[45] J. Mu, F. Nie, W. Wang, J. Xu, J. Zhang, and H. Liu, \"Mocolnet: A momentum contrastive learning network for multimodal aspect-level sentiment analysis,\" IEEE Transactions on Knowledge and Data Engineering, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 203, + 301, + 249 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 203, + 301, + 249 + ], + "spans": [ + { + "bbox": [ + 47, + 203, + 301, + 249 + ], + "type": "text", + "content": "[46] F. Zhao, C. Li, Z. Wu, Y. Ouyang, J. Zhang, and X. Dai, \"M2df: Multi-grained multi-curriculum denoising framework for multimodal aspect-based sentiment analysis,\" in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, 2023, pp. 9057-9070." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 47, + 250, + 301, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 250, + 301, + 278 + ], + "spans": [ + { + "bbox": [ + 47, + 250, + 301, + 278 + ], + "type": "text", + "content": "[47] E. Cambria, R. Mao, M. Chen, Z. Wang, and S.-B. Ho, \"Seven pillars for the future of artificial intelligence,\" IEEE Intelligent Systems, vol. 38, no. 6, pp. 62-69, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 278, + 301, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 278, + 301, + 297 + ], + "spans": [ + { + "bbox": [ + 47, + 278, + 301, + 297 + ], + "type": "text", + "content": "[48] R. Arnheim, Art and visual perception: A psychology of the creative eye. Univ of California Press, 1954." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 47, + 297, + 301, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 297, + 301, + 326 + ], + "spans": [ + { + "bbox": [ + 47, + 297, + 301, + 326 + ], + "type": "text", + "content": "[49] V. S. Ramachandran and W. Hirstein, \"The science of art: A neurological theory of aesthetic experience,\" Journal of Consciousness Studies, vol. 6, no. 6-7, pp. 15-51, 1999." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 47, + 326, + 301, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 326, + 301, + 354 + ], + "spans": [ + { + "bbox": [ + 47, + 326, + 301, + 354 + ], + "type": "text", + "content": "[50] H. Zeng, Z. Cao, L. Zhang, and A. C. Bovik, \"A unified probabilistic formulation of image aesthetic assessment,\" IEEE Transactions on Image Processing, vol. 29, pp. 1548-1561, 2019." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 47, + 354, + 301, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 354, + 301, + 381 + ], + "spans": [ + { + "bbox": [ + 47, + 354, + 301, + 381 + ], + "type": "text", + "content": "[51] G. C. Cupchik and J. László, Emerging visions of the aesthetic process: In psychology, semiology, and philosophy. Cambridge University Press, 1992." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 47, + 382, + 301, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 382, + 301, + 419 + ], + "spans": [ + { + "bbox": [ + 47, + 382, + 301, + 419 + ], + "type": "text", + "content": "[52] X. Jin, L. Wu, G. Zhao, X. Li, X. Zhang, S. Ge, D. Zou, B. Zhou, and X. Zhou, \"Aesthetic attributes assessment of images,\" in Proceedings of the 27th ACM international conference on multimedia, 2019, pp. 311-319." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 47, + 419, + 301, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 419, + 301, + 456 + ], + "spans": [ + { + "bbox": [ + 47, + 419, + 301, + 456 + ], + "type": "text", + "content": "[53] J. Ke, K. Ye, J. Yu, Y. Wu, P. Milanfar, and F. Yang, \"Vila: Learning image aesthetics from user comments with vision-language pretraining,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 10041-10051." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 47, + 456, + 301, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 456, + 301, + 495 + ], + "spans": [ + { + "bbox": [ + 47, + 456, + 301, + 495 + ], + "type": "text", + "content": "[54] J. Kruk, C. Ziems, and D. Yang, \"Impressions: Visual semiotics and aesthetic impact understanding,\" in Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, 2023, pp. 12273-12291." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 47, + 495, + 301, + 522 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 495, + 301, + 522 + ], + "spans": [ + { + "bbox": [ + 47, + 495, + 301, + 522 + ], + "type": "text", + "content": "[55] R. Anil, A. M. Dai, O. First, M. Johnson, D. Lepikhin, A. Passos, S. Shakeri, E. Taropa, P. Bailey, Z. Chen et al., \"Palm 2 technical report,\" arXiv preprint arXiv:2305.10403, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 47, + 522, + 301, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 522, + 301, + 568 + ], + "spans": [ + { + "bbox": [ + 47, + 522, + 301, + 568 + ], + "type": "text", + "content": "[56] R. Mao, G. Chen, X. Zhang, F. Guerin, and E. Cambria, \"GPTEval: A survey on assessments of ChatGPT and GPT-4,\" in Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024). ELRA and ICCL, 2024, pp. 7844-7866." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 47, + 568, + 301, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 568, + 301, + 605 + ], + "spans": [ + { + "bbox": [ + 47, + 568, + 301, + 605 + ], + "type": "text", + "content": "[57] S. Zhao, L. A. Tuan, J. Fu, J. Wen, and W. Luo, \"Exploring clean label backdoor attacks and defense in language models,\" IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 47, + 605, + 301, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 605, + 301, + 643 + ], + "spans": [ + { + "bbox": [ + 47, + 605, + 301, + 643 + ], + "type": "text", + "content": "[58] S. Zhao, X. Xu, L. Xiao, J. Wen, and L. A. Tuan, \"Clean-label backdoor attack and defense: An examination of language model vulnerability,\" Expert Systems with Applications, vol. 265, p. 125856, 2025." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 47, + 643, + 301, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 643, + 301, + 672 + ], + "spans": [ + { + "bbox": [ + 47, + 643, + 301, + 672 + ], + "type": "text", + "content": "[59] J. Achiam, S. Adler, S. Agarwal, L. Ahmad, I. Akkaya, F. L. Aleman, D. Almeida, J. Altenschmidt, S. Altman, S. Anadkat et al., \"Gpt-4 technical report,\" arXiv preprint arXiv:2303.08774, 2023." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 47, + 672, + 301, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 672, + 301, + 708 + ], + "spans": [ + { + "bbox": [ + 47, + 672, + 301, + 708 + ], + "type": "text", + "content": "[60] G. Team, R. Anil, S. Borgeaud, J.-B. Alayrac, J. Yu, R. Soricut, J. Schalkwyk, A. M. Dai, A. Hauth, K. Millican et al., \"Gemini: a family of highly capable multimodal models,\" arXiv preprint arXiv:2312.11805, 2023." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 47, + 708, + 301, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 708, + 301, + 746 + ], + "spans": [ + { + "bbox": [ + 47, + 708, + 301, + 746 + ], + "type": "text", + "content": "[61] H. Touvron, L. Martin, K. Stone, P. Albert, A. Almahairi, Y. Babaei, N. Bashlykov, S. Batra, P. Bhargava, S. Bhosale et al., \"Llama 2: Open foundation and fine-tuned chat models,\" arXiv preprint arXiv:2307.09288, 2023." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 310, + 44, + 564, + 746 + ], + "type": "list", + "angle": 0, + "index": 45, + "blocks": [ + { + "bbox": [ + 310, + 44, + 564, + 72 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 44, + 564, + 72 + ], + "spans": [ + { + "bbox": [ + 310, + 44, + 564, + 72 + ], + "type": "text", + "content": "[62] H. Liu, W. Wang, H. Sun, A. Rocha, and H. Li, \"Robust domain misinformation detection via multi-modal feature alignment,\" IEEE Transactions on Information Forensics and Security, 2023." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 310, + 72, + 564, + 108 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 72, + 564, + 108 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 564, + 108 + ], + "type": "text", + "content": "[63] R. Mao, K. He, C. Ong, Q. Liu, and E. Cambria, “Metapro 2.0: Computational metaphor processing on the effectiveness of anomalous language modeling,” in Findings of the Association for Computational Linguistics ACL 2024, 2024, pp. 9891–9908." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 310, + 108, + 564, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 108, + 564, + 136 + ], + "spans": [ + { + "bbox": [ + 310, + 108, + 564, + 136 + ], + "type": "text", + "content": "[64] Z. Tan, D. Li, S. Wang, A. Beigi, B. Jiang, A. Bhattacharjee, M. Karami, J. Li, L. Cheng, and H. Liu, \"Large language models for data annotation: A survey,\" arXiv preprint arXiv:2402.13446, 2024." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 310, + 136, + 564, + 164 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 136, + 564, + 164 + ], + "spans": [ + { + "bbox": [ + 310, + 136, + 564, + 164 + ], + "type": "text", + "content": "[65] R. Mao, G. Chen, X. Li, M. Ge, and E. Cambria, \"A comparative analysis of metaphorical cognition in chatgpt and human minds,\" Cognitive Computation, vol. 17, no. 1, p. 35, 2025." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 310, + 164, + 564, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 164, + 564, + 191 + ], + "spans": [ + { + "bbox": [ + 310, + 164, + 564, + 191 + ], + "type": "text", + "content": "[66] Y. Jia, X. Wu, H. Li, Q. Zhang, Y. Hu, S. Zhao, and W. Fan, \"Uni-retrieval: A multi-style retrieval framework for stem's education,\" arXiv preprint arXiv:2502.05863, 2025." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 310, + 191, + 564, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 191, + 564, + 229 + ], + "spans": [ + { + "bbox": [ + 310, + 191, + 564, + 229 + ], + "type": "text", + "content": "[67] J. Wei, X. Wang, D. Schuurmans, M. Bosma, F. Xia, E. Chi, Q. V. Le, D. Zhou et al., \"Chain-of-thought prompting elicits reasoning in large language models,\" Advances in neural information processing systems, vol. 35, pp. 24824-24837, 2022." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 310, + 229, + 564, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 229, + 564, + 266 + ], + "spans": [ + { + "bbox": [ + 310, + 229, + 564, + 266 + ], + "type": "text", + "content": "[68] K. Cobbe, V. Kosaraju, M. Bavarian, M. Chen, H. Jun, L. Kaiser, M. Plappert, J. Tworek, J. Hilton, R. Nakano et al., \"Training verifiers to solve math word problems,\" arXiv preprint arXiv:2110.14168, 2021." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 310, + 266, + 564, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 266, + 564, + 293 + ], + "spans": [ + { + "bbox": [ + 310, + 266, + 564, + 293 + ], + "type": "text", + "content": "[69] P. Wang, A. Chan, F. Ilievski, M. Chen, and X. Ren, \"Pinto: Faithful language reasoning using prompt-generated rationales,\" in The Eleventh International Conference on Learning Representations, 2023." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 310, + 293, + 564, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 293, + 564, + 331 + ], + "spans": [ + { + "bbox": [ + 310, + 293, + 564, + 331 + ], + "type": "text", + "content": "[70] P. Wang, Z. Wang, Z. Li, Y. Gao, B. Yin, and X. Ren, \"Scott: Self-consistent chain-of-thought distillation,\" in Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2023, pp. 5546-5558." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 310, + 331, + 564, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 331, + 564, + 358 + ], + "spans": [ + { + "bbox": [ + 310, + 331, + 564, + 358 + ], + "type": "text", + "content": "[71] H. Liu, Z. Teng, L. Cui, C. Zhang, Q. Zhou, and Y. Zhang, \"Logicot: Logical chain-of-thought instruction tuning,\" in The 2023 Conference on Empirical Methods in Natural Language Processing, 2023." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 310, + 358, + 564, + 395 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 358, + 564, + 395 + ], + "spans": [ + { + "bbox": [ + 310, + 358, + 564, + 395 + ], + "type": "text", + "content": "[72] M. Kang, S. Lee, J. Baek, K. Kawaguchi, and S. J. Hwang, \"Knowledge-augmented reasoning distillation for small language models in knowledge-intensive tasks,\" Advances in Neural Information Processing Systems, vol. 36, 2024." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 310, + 395, + 564, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 395, + 564, + 423 + ], + "spans": [ + { + "bbox": [ + 310, + 395, + 564, + 423 + ], + "type": "text", + "content": "[73] Y. Li, A. Dao, W. Bao, Z. Tan, T. Chen, H. Liu, and Y. Kong, \"Facial affective behavior analysis with instruction tuning,\" in European Conference on Computer Vision. Springer, 2025, pp. 165-186." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 310, + 423, + 564, + 451 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 423, + 564, + 451 + ], + "spans": [ + { + "bbox": [ + 310, + 423, + 564, + 451 + ], + "type": "text", + "content": "[74] J. Guo, J. Deng, A. Lattas, and S. Zafeiriou, \"Sample and computation redistribution for efficient face detection,\" in International Conference on Learning Representations, 2021." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 310, + 451, + 564, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 451, + 564, + 497 + ], + "spans": [ + { + "bbox": [ + 310, + 451, + 564, + 497 + ], + "type": "text", + "content": "[75] S. Wegreffer, J. Hessel, S. Swayamdipta, M. Riedl, and Y. Choi, \"Reframing human-ai collaboration for generating free-text explanations,\" in Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, 2022, pp. 632–658." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 310, + 497, + 564, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 497, + 564, + 534 + ], + "spans": [ + { + "bbox": [ + 310, + 497, + 564, + 534 + ], + "type": "text", + "content": "[76] L. Meng, H. Li, B.-C. Chen, S. Lan, Z. Wu, Y.-G. Jiang, and S.-N. Lim, \"Adavit: Adaptive vision transformers for efficient image recognition,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp. 12309-12318." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 310, + 534, + 564, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 534, + 564, + 570 + ], + "spans": [ + { + "bbox": [ + 310, + 534, + 564, + 570 + ], + "type": "text", + "content": "[77] Z. Fu, L. Zhang, H. Xia, and Z. Mao, \"Linguistic-aware patch slimming framework for fine-grained cross-modal alignment,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 26307-26316." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 310, + 571, + 564, + 607 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 571, + 564, + 607 + ], + "spans": [ + { + "bbox": [ + 310, + 571, + 564, + 607 + ], + "type": "text", + "content": "[78] C. Maddison, A. Mnih, and Y. Teh, \"The concrete distribution: A continuous relaxation of discrete random variables,\" in Proceedings of the international conference on Learning Representations. International Conference on Learning Representations, 2017." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 310, + 607, + 564, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 607, + 564, + 635 + ], + "spans": [ + { + "bbox": [ + 310, + 607, + 564, + 635 + ], + "type": "text", + "content": "[79] Z. Zong, K. Li, G. Song, Y. Wang, Y. Qiao, B. Leng, and Y. Liu, \"Self-slimmed vision transformer,\" in European Conference on Computer Vision. Springer, 2022, pp. 432-448." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 310, + 635, + 564, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 635, + 564, + 662 + ], + "spans": [ + { + "bbox": [ + 310, + 635, + 564, + 662 + ], + "type": "text", + "content": "[80] F. Faghri, D. J. Fleet, J. R. Kiros, and S. Fidler, \"Vse++: Improving visual-semantic embeddings with hard negatives,\" arXiv preprint arXiv:1707.05612, 2017." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 310, + 663, + 564, + 709 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 663, + 564, + 709 + ], + "spans": [ + { + "bbox": [ + 310, + 663, + 564, + 709 + ], + "type": "text", + "content": "[81] L. Yang, J. Yu, C. Zhang, and J.-C. Na, \"Fine-grained sentiment analysis of political tweets with entity-aware multimodal network,\" in Diversity, Divergence, Dialogue: 16th International Conference, iConference 2021, Beijing, China, March 17–31, 2021, Proceedings, Part I 16. Springer, 2021, pp. 411–420." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 310, + 709, + 564, + 746 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 709, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 310, + 709, + 564, + 746 + ], + "type": "text", + "content": "[82] H. W. Chung, L. Hou, S. Longpre, B. Zoph, Y. Tay, W. Fedus, Y. Li, X. Wang, M. Dehghani, S. Brahma et al., \"Scaling instructionfinetuned language models,\" Journal of Machine Learning Research, vol. 25, no. 70, pp. 1-53, 2024." + } + ] + } + ], + "index": 44 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 45, + 43, + 301, + 450 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 45, + 43, + 301, + 62 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 43, + 301, + 62 + ], + "spans": [ + { + "bbox": [ + 45, + 43, + 301, + 62 + ], + "type": "text", + "content": "[83] I. Loshchilov, \"Decoupled weight decay regularization,\" arXiv preprint arXiv:1711.05101, 2017." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 45, + 62, + 301, + 89 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 62, + 301, + 89 + ], + "spans": [ + { + "bbox": [ + 45, + 62, + 301, + 89 + ], + "type": "text", + "content": "[84] K. He, X. Zhang, S. Ren, and J. Sun, \"Deep residual learning for image recognition,\" in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 770-778." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 46, + 89, + 301, + 125 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 89, + 301, + 125 + ], + "spans": [ + { + "bbox": [ + 46, + 89, + 301, + 125 + ], + "type": "text", + "content": "[85] D. Tang, B. Qin, and T. Liu, \"Aspect level sentiment classification with deep memory network,\" in Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, 2016, pp. 214-224." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 46, + 125, + 301, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 125, + 301, + 161 + ], + "spans": [ + { + "bbox": [ + 46, + 125, + 301, + 161 + ], + "type": "text", + "content": "[86] F. Fan, Y. Feng, and D. Zhao, \"Multi-grained attention network for aspect-level sentiment classification,\" in Proceedings of the 2018 conference on empirical methods in natural language processing, 2018, pp. 3433-3442." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 46, + 161, + 301, + 197 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 161, + 301, + 197 + ], + "spans": [ + { + "bbox": [ + 46, + 161, + 301, + 197 + ], + "type": "text", + "content": "[87] J. D. M.-W. C. Kenton and L. K. Toutanova, \"Bert: Pre-training of deep bidirectional transformers for language understanding,\" in Proceedings of naacL-HLT, vol. 1. Minneapolis, Minnesota, 2019, p. 2." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 46, + 197, + 301, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 197, + 301, + 233 + ], + "spans": [ + { + "bbox": [ + 46, + 197, + 301, + 233 + ], + "type": "text", + "content": "[88] N. Xu, W. Mao, and G. Chen, \"Multi-interactive memory network for aspect based multimodal sentiment analysis,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 33, 2019, pp. 371-378." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 46, + 233, + 301, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 233, + 301, + 269 + ], + "spans": [ + { + "bbox": [ + 46, + 233, + 301, + 269 + ], + "type": "text", + "content": "[89] J. Yu, K. Chen, and R. Xia, \"Hierarchical interactive multimodal transformer for aspect-based multimodal sentiment analysis,\" IEEE Transactions on Affective Computing, vol. 14, no. 3, pp. 1966-1978, 2022." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 46, + 269, + 301, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 269, + 301, + 305 + ], + "spans": [ + { + "bbox": [ + 46, + 269, + 301, + 305 + ], + "type": "text", + "content": "[90] D. Liu, L. Li, X. Tao, J. Cui, and Q. Xie, \"Descriptive prompt paraphrasing for target-oriented multimodal sentiment classification,\" in Findings of the Association for Computational Linguistics: EMNLP 2023, 2023, pp. 4174-4186." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 46, + 305, + 301, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 305, + 301, + 342 + ], + "spans": [ + { + "bbox": [ + 46, + 305, + 301, + 342 + ], + "type": "text", + "content": "[91] B. Yang and J. Li, \"Visual elements mining as prompts for instruction learning for target-oriented multimodal sentiment classification,\" in Findings of the Association for Computational Linguistics: EMNLP 2023, 2023, pp. 6062-6075." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 46, + 342, + 301, + 394 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 342, + 301, + 394 + ], + "spans": [ + { + "bbox": [ + 46, + 342, + 301, + 394 + ], + "type": "text", + "content": "[92] J. Camacho-Collados, K. Rezaee, T. Riahi, A. Ushio, D. Loureiro, D. Antypas, J. Boisson, L. E. Anke, F. Liu, and E. Martinez-Camara, \"Tweetnlp: Cutting-edge natural language processing for social media,\" in Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, 2022, pp. 38-49." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 46, + 395, + 301, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 395, + 301, + 432 + ], + "spans": [ + { + "bbox": [ + 46, + 395, + 301, + 432 + ], + "type": "text", + "content": "[93] J. Ye, J. Zhou, J. Tian, R. Wang, Q. Zhang, T. Gui, and X.-J. Huang, \"Rethinkingtmsc: An empirical study for target-oriented multimodal sentiment classification,\" in Findings of the Association for Computational Linguistics: EMNLP 2023, 2023, pp. 270-277." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 46, + 432, + 301, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 46, + 432, + 301, + 450 + ], + "spans": [ + { + "bbox": [ + 46, + 432, + 301, + 450 + ], + "type": "text", + "content": "[94] M. Ivanova and S. French, The aesthetics of science: beauty, imagination and understanding. Routledge, 2020." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "type": "image", + "bbox": [ + 46, + 472, + 121, + 545 + ], + "blocks": [ + { + "bbox": [ + 46, + 472, + 121, + 545 + ], + "lines": [ + { + "bbox": [ + 46, + 472, + 121, + 545 + ], + "spans": [ + { + "bbox": [ + 46, + 472, + 121, + 545 + ], + "type": "image", + "image_path": "b2455a823d706a1fb297782d965eb4fc8120cd085b7a9a145704ebd539f3a434.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 127, + 460, + 301, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 460, + 301, + 560 + ], + "spans": [ + { + "bbox": [ + 127, + 460, + 301, + 560 + ], + "type": "text", + "content": "Luwei Xiao is currently pursuing his Ph.D. degree in the School of Computer Science and Technology at East China Normal University, Shanghai, China, under the supervision of Prof. Liang He. He is presently conducting an academic visit to the College of Computing and Data Science at Nanyang Technological University, Singapore, under the supervision of Prof. Erik Cambria, with funding support from the China Scholarship Council (CSC). His research interests encompass multimodal learning, semi-" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 45, + 560, + 220, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 560, + 220, + 570 + ], + "spans": [ + { + "bbox": [ + 45, + 560, + 220, + 570 + ], + "type": "text", + "content": "ment analysis, and image aesthetic assessment." + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 48, + 608, + 119, + 700 + ], + "blocks": [ + { + "bbox": [ + 48, + 608, + 119, + 700 + ], + "lines": [ + { + "bbox": [ + 48, + 608, + 119, + 700 + ], + "spans": [ + { + "bbox": [ + 48, + 608, + 119, + 700 + ], + "type": "image", + "image_path": "590aa9d63be2eebe6cfa3158c9b043df2309dd5b1ad1248286a4339f6c939ac7.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "bbox": [ + 127, + 607, + 301, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 607, + 301, + 707 + ], + "spans": [ + { + "bbox": [ + 127, + 607, + 301, + 707 + ], + "type": "text", + "content": "Rui Mao is a Research Scientist and Lead Investigator at Nanyang Technological University. He obtained his Ph.D. degree in Computing Science from the University of Aberdeen. His research interest lies in NLP, cognitive computing, and their applications in finance and cognitive science. He and his funded company (Ruimao Tech) have developed an end-to-end system (MetaPro) for computational metaphor processing and a neural search engine (wensousou.com) for searching Chinese ancient po" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 45, + 707, + 301, + 744 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 707, + 301, + 744 + ], + "spans": [ + { + "bbox": [ + 45, + 707, + 301, + 744 + ], + "type": "text", + "content": "ems with modern language. He served as Area Chair in COLING and EMNLP and Associate Editor in IEEE Transactions on Affective Computing, Expert Systems, Information Fusion and Neurocomputing. Contact him at rui.mao@ntu.edu.sg." + } + ] + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 310, + 50, + 383, + 136 + ], + "blocks": [ + { + "bbox": [ + 310, + 50, + 383, + 136 + ], + "lines": [ + { + "bbox": [ + 310, + 50, + 383, + 136 + ], + "spans": [ + { + "bbox": [ + 310, + 50, + 383, + 136 + ], + "type": "image", + "image_path": "c68014cf5101a6d4a08998285ee4b085ef6bfd6d34aee5cfdc345bad0334cc9f.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 142, + 332, + 152 + ], + "lines": [ + { + "bbox": [ + 310, + 142, + 332, + 152 + ], + "spans": [ + { + "bbox": [ + 310, + 142, + 332, + 152 + ], + "type": "text", + "content": "tacks." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + }, + { + "bbox": [ + 391, + 43, + 564, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 391, + 43, + 564, + 144 + ], + "spans": [ + { + "bbox": [ + 391, + 43, + 564, + 144 + ], + "type": "text", + "content": "Shuai Zhao obtained his Ph.D. degree from Jinan University in 2024. He spent one year as a visiting student and six months as a research assistant at the School of Computer Science and Engineering, Nanyang Technological University. He is now a Postdoctoral Researcher at the College of Computing and Data Science, Nanyang Technological University. His current research interests include deep learning and natural language processing for code generation, summary generation, text classification and backdoor at" + } + ] + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 312, + 189, + 382, + 281 + ], + "blocks": [ + { + "bbox": [ + 312, + 189, + 382, + 281 + ], + "lines": [ + { + "bbox": [ + 312, + 189, + 382, + 281 + ], + "spans": [ + { + "bbox": [ + 312, + 189, + 382, + 281 + ], + "type": "image", + "image_path": "408bcb009bb136bc756c3feeeae37046041ca6f53f35e217e619ad31c595a06a.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "bbox": [ + 391, + 188, + 564, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 391, + 188, + 564, + 289 + ], + "spans": [ + { + "bbox": [ + 391, + 188, + 564, + 289 + ], + "type": "text", + "content": "Qika Lin received his Ph.D. degree at Xi'an Jiaotong University. Currently, he is a Research Fellow at the National University of Singapore. His research interests include natural language processing, knowledge reasoning, and multimodal learning. He has published papers in top-tier journals/conferences, including TKDE, ACL, SIGIR, KDD, ICDE, and IJCAI. He has actively contributed to several journals/conferences as a reviewer or PC member, including TPAMI, IJCV, TKDE, TMC, TNNLS, NeurIPS, ICLR, SIGIR," + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 309, + 289, + 564, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 289, + 564, + 306 + ], + "spans": [ + { + "bbox": [ + 309, + 289, + 564, + 306 + ], + "type": "text", + "content": "ACL, and EMNLP. He also served as a Guest Editor of IEEE TCSS and Information Fusion." + } + ] + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 310, + 344, + 384, + 435 + ], + "blocks": [ + { + "bbox": [ + 310, + 344, + 384, + 435 + ], + "lines": [ + { + "bbox": [ + 310, + 344, + 384, + 435 + ], + "spans": [ + { + "bbox": [ + 310, + 344, + 384, + 435 + ], + "type": "image", + "image_path": "4781794a3325adb4297715bb796cb377152ae80fbf054d40217e4712f2292d98.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "bbox": [ + 391, + 342, + 564, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 391, + 342, + 564, + 407 + ], + "spans": [ + { + "bbox": [ + 391, + 342, + 564, + 407 + ], + "type": "text", + "content": "Yanhao Jia is a phd student at Nanyang Technological University. He obtained his bechealor degree in Computing Science from Shandong University. He has published over seven conference/journal papers on ECCV/NeurIPS/IEEE Trans on nuclear science and been the reviewer for ACM MM and ECCV." + } + ] + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 316, + 472, + 378, + 563 + ], + "blocks": [ + { + "bbox": [ + 316, + 472, + 378, + 563 + ], + "lines": [ + { + "bbox": [ + 316, + 472, + 378, + 563 + ], + "spans": [ + { + "bbox": [ + 316, + 472, + 378, + 563 + ], + "type": "image", + "image_path": "055ce189bbd92010fabe2e02ed9ab7e4fe8376936fe577fb386be48061eb9c9e.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "bbox": [ + 391, + 470, + 564, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 391, + 470, + 564, + 553 + ], + "spans": [ + { + "bbox": [ + 391, + 470, + 564, + 553 + ], + "type": "text", + "content": "Liang He received his PhD degree from the Department of Computer Science and Technology, East China Normal University, China. He is now a professor and the Vice Dean of the School of Computer Science and Technology, East China Normal University. His current research interest includes Natural Language Processing, Knowledge Processing, and Human in the Loop for Decision-making." + } + ] + } + ], + "index": 30 + }, + { + "type": "image", + "bbox": [ + 310, + 600, + 384, + 693 + ], + "blocks": [ + { + "bbox": [ + 310, + 600, + 384, + 693 + ], + "lines": [ + { + "bbox": [ + 310, + 600, + 384, + 693 + ], + "spans": [ + { + "bbox": [ + 310, + 600, + 384, + 693 + ], + "type": "image", + "image_path": "abc09142c4768e0fb2c6c4106a4e36deaa52ea49863b3bb59a2135f5bffcfe98.jpg" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_body" + } + ], + "index": 31 + }, + { + "bbox": [ + 391, + 599, + 564, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 391, + 599, + 564, + 700 + ], + "spans": [ + { + "bbox": [ + 391, + 599, + 564, + 700 + ], + "type": "text", + "content": "Erik Cambria is a Professor at Nanyang Technological University, where he also holds the appointment of Provost Chair in Computer Science and Engineering, and Founder of several AI companies, such as SenticNet, offering B2B sentiment analysis services, and finaXai, providing fully explainable financial insights. His research focuses on neurosymbolic AI for interpretable, trustworthy, and explainable affective computing in domains like social media monitoring, financial forecasting, and AI for social" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 309, + 700, + 564, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 309, + 700, + 564, + 746 + ], + "spans": [ + { + "bbox": [ + 309, + 700, + 564, + 746 + ], + "type": "text", + "content": "good. He is an IEEE Fellow, Associate Editor of various top-tier AI journals, e.g., Information Fusion and IEEE Transactions on Affective Computing, and is involved in several international conferences as keynote speaker, program chair and committee member. Contact him at cambria@ntu.edu.sg." + } + ] + } + ], + "index": 33 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "spans": [ + { + "bbox": [ + 45, + 26, + 317, + 35 + ], + "type": "text", + "content": "IEEE TRANSACTIONS ON AFFECTIVE COMPUTING, VOL. XX, NO. XX, XXXX 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "spans": [ + { + "bbox": [ + 555, + 26, + 564, + 34 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15909/41ce0363-307a-4461-bbaf-6fdf5036b2e7_content_list.json b/data/2025/2504_15xxx/2504.15909/41ce0363-307a-4461-bbaf-6fdf5036b2e7_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..949d3af82969443829d8a4998095f250a0cdc4e9 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/41ce0363-307a-4461-bbaf-6fdf5036b2e7_content_list.json @@ -0,0 +1,5895 @@ +[ + { + "type": "text", + "text": "Synergizing RAG and Reasoning: A Systematic Review", + "text_level": 1, + "bbox": [ + 83, + 87, + 913, + 118 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yunfan Gao", + "bbox": [ + 181, + 128, + 279, + 143 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shanghai Research Institute for", + "bbox": [ + 122, + 145, + 339, + 159 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Intelligent Autonomous Systems,", + "bbox": [ + 119, + 161, + 344, + 175 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tongji University", + "bbox": [ + 171, + 176, + 290, + 190 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "China", + "bbox": [ + 209, + 191, + 251, + 203 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "gaoyunfan1602@gmail.com", + "bbox": [ + 135, + 205, + 323, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yuxi Bi", + "bbox": [ + 199, + 232, + 261, + 247 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "College of Design and Innovation,", + "bbox": [ + 116, + 250, + 346, + 263 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tongji University", + "bbox": [ + 171, + 263, + 289, + 279 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "China", + "bbox": [ + 209, + 280, + 251, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "yuxibi@gmail.com", + "bbox": [ + 166, + 295, + 294, + 309 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yun Xiong", + "bbox": [ + 454, + 128, + 542, + 145 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shanghai Key Laboratory of Data", + "bbox": [ + 387, + 146, + 611, + 160 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Science, School of Computer Science,", + "bbox": [ + 374, + 161, + 624, + 175 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Fudan University", + "bbox": [ + 439, + 176, + 557, + 189 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "China", + "bbox": [ + 475, + 191, + 519, + 203 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "yunx@fudan.edu.cn", + "bbox": [ + 429, + 205, + 566, + 220 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ming Xue", + "bbox": [ + 457, + 232, + 539, + 248 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Percena AI", + "bbox": [ + 460, + 250, + 535, + 262 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "China", + "bbox": [ + 477, + 265, + 519, + 277 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "mxue@percena.co", + "bbox": [ + 436, + 280, + 560, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yijie Zhong", + "bbox": [ + 718, + 128, + 813, + 145 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "College of Design and Innovation,", + "bbox": [ + 653, + 146, + 882, + 160 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tongji University", + "bbox": [ + 707, + 161, + 826, + 175 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "China", + "bbox": [ + 745, + 176, + 787, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "dun.haski@gmail.com", + "bbox": [ + 691, + 190, + 841, + 205 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Haofen Wang*", + "bbox": [ + 707, + 232, + 825, + 250 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "College of Design and Innovation,", + "bbox": [ + 653, + 250, + 880, + 263 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tongji University", + "bbox": [ + 707, + 263, + 825, + 279 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "China", + "bbox": [ + 745, + 280, + 787, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "carter.whfcarter@gmail.com", + "bbox": [ + 669, + 295, + 862, + 309 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 83, + 319, + 163, + 333 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent breakthroughs in large language models (LLMs), particularly in reasoning capabilities, have propelled Retrieval-Augmented Generation (RAG) to unprecedented levels. By synergizing retrieval mechanisms with advanced reasoning, LLMs can now tackle increasingly complex problems. This paper presents a systematic review of the collaborative interplay between RAG and reasoning, clearly defining \"reasoning\" within the RAG context. It construct a comprehensive taxonomy encompassing multi-dimensional collaborative objectives, representative paradigms, and technical implementations, and analyze the bidirectional synergy methods. Additionally, we critically evaluate current limitations in RAG assessment, including the absence of intermediate supervision for multi-step reasoning and practical challenges related to cost-risk trade-offs. To bridge theory and practice, we provide practical guidelines tailored to diverse real-world applications. Finally, we identify promising research directions, such as graph-based knowledge integration, hybrid model collaboration, and RL-driven optimization. Overall, this work presents a theoretical framework and practical foundation to advance RAG systems in academia and industry, fostering the next generation of RAG solutions.", + "bbox": [ + 81, + 338, + 483, + 672 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 84, + 696, + 230, + 710 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent breakthroughs in Large Language Models (LLMs) like OpenAI O1 [39] and DeepSeek-R1 [25] have shifted the paradigm from \"pre-training scaling\" to \"test-time scaling\" [63]. Unlike traditional language models that improve via corpus accumulation during pre-training, these models enhance performance in complex tasks—such as mathematical derivation and code generation [29]—through post-training innovations during the inference phase (e.g., Long-CoT thinking [8]). This shift has led to the emergence of \"Large Reasoning Models\" (LRMs) [99] with advanced internal reasoning abilities.", + "bbox": [ + 81, + 715, + 482, + 868 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "These advancements have not only boosted basic model capabilities but also opened new avenues for application technologies like Retrieval-Augmented Generation (RAG) [21]. Serving as a key link between language models and external knowledge, RAG overcomes traditional LLMs' limits in knowledge freshness, domain specificity, and factual accuracy by retrieving real-time non-parametric information and integrating it into the context. This enhances information processing and reduces hallucination risks in knowledge-intensive tasks.", + "bbox": [ + 511, + 319, + 916, + 469 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Technological evolution is advancing RAG architectures through innovations like query rewriting [61], re-ranking [1], and hybrid retrieval [88], creating an Advanced RAG paradigm focused on pre-retrieval optimization and post-retrieval refinement. Modular RAG [22] further breaks down these systems into component-based, service-oriented architectures, using orchestration to tackle practical challenges.", + "bbox": [ + 511, + 470, + 915, + 577 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Despite improvements in query intent recognition and knowledge use, challenges of RAG remain in demanding tasks like deep research and complex decision-making. Key issues include: 1) difficulty capturing intent from ambiguous queries; 2) poor logical coherence in multi-hop reasoning; 3) efficiency limits of traditional retrieval in open domains; and 4) degraded generation quality from noisy retrieved data.", + "bbox": [ + 511, + 577, + 913, + 681 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Models like DeepSeek-R1, with strong reasoning capabilities, inspire new directions for RAG systems. As shown in Figure 1, recent research explores integrating formal reasoning frameworks with knowledge retrieval. This approach optimizes retrieval through logic-driven query reformulation and uses reasoning to analyze and validate retrieved knowledge, creating cognitive synergy between retrieval and generation. This paradigm aims to overcome conventional limitations, enabling intelligent systems with rigorous logic and reliable knowledge use. From a trend perspective, an increasing number of methods combine reasoning and retrieval abilities through reinforcement learning (RL), marking a new direction in the LRM era. Meanwhile, prompt-based approaches continue to rapidly evolve, with researchers aiming", + "bbox": [ + 511, + 681, + 915, + 893 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.15909v2 [cs.IR] 24 Apr 2025", + "bbox": [ + 22, + 272, + 60, + 707 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding Author", + "bbox": [ + 83, + 893, + 212, + 906 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9e84b6f53ff577c819991b081153340f022d99ed4e4207a9a3ce616a9fa9e815.jpg", + "image_caption": [ + "Figure 1. Timeline of studies on RAG-reasoning synergy. From a technical perspective, the approaches can be categorized into Prompt-Based, Tuning-Based, and RL-Based methods. A notable trend is the increasing use of Reinforcement Learning to enhance RAG systems, particularly following the prosperity of test-time scaling. Meanwhile, Prompt-Based and Tuning-Based methods continue to evolve in parallel, demonstrating that there are multiple pathways to integrating reasoning capabilities into RAG systems." + ], + "image_footnote": [], + "bbox": [ + 86, + 88, + 911, + 422 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "to achieve results through workflow design while keeping model parameters frozen. Notably, sole reliance on tuning methods is steadily decreasing, suggesting limited improvements from additional fine-tuning at this stage.", + "bbox": [ + 81, + 530, + 482, + 590 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Traditional RAG is limited by its unidirectional flow (retrieval $\\rightarrow$ generation). Integrating reasoning capabilities grants the system greater autonomy, unlocking new possibilities. As shown in Figure 2, this integration is poised to drive major breakthroughs, enabling practical use in complex real-world scenarios.", + "bbox": [ + 81, + 590, + 482, + 679 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1) From Ambiguous Semantic Matching to Logic-Driven Targeted Retrieval. Traditional RAG relies on semantic similarity for retrieval; however, it is sensitive to phrasing variations. Advanced reasoning allows deep logical analysis of queries (e.g., causal links, conditional constraints) to dynamically refine retrieval strategies [24]. For example, to answer \"How to reduce postoperative infection risks in diabetes patients?\", the system prioritizes retrieving \"blood glucose control thresholds\" and \"antibiotic usage guidelines\" over simply matching \"diabetes postoperative care\". This approach supports multi-hop retrieval by breaking down complex queries into sequential sub-queries while preserving cross-document coherence through reasoning chains.", + "bbox": [ + 81, + 681, + 482, + 878 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "2) From Simple Information Aggregation to Logically Coherent Context Construction. Current RAG systems input all retrieved document chunks into context directly, often causing fragmented or contradictory information that confuses LLMs. Reasoning-enhanced systems integrate evidence chains by logically verifying and inferring causality in retrieved content, filtering conflicts and forming coherent explanations [100]. They also use dynamic knowledge completion to detect missing logical links, prompting iterative retrieval or inference to fill gaps [51].", + "3) From Simple and Single-Turn QA to Systemic Decision Support. Traditional RAG performs well in factual QA [65] but struggles with multi-step and complex decision-making. Reasoning-integrated systems produce structured reasoning output, enhancing multi-objective optimization to balance retrieval breadth and solution feasibility under various constraints. For example, multiple constraints under different conditions in engineering construction plans [54], and the formulation of diagnosis and treatment plans for various diseases in the medical field [105].", + "4) From Indiscriminate Retrieval to Intelligent Resource Allocation. Traditional RAG retrieves documents for all queries, regardless of complexity. Reasoning-enhanced systems use on-demand retrieval, handling simple queries" + ], + "bbox": [ + 511, + 530, + 913, + 893 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 59, + 346, + 71 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Gao et al.", + "bbox": [ + 857, + 59, + 911, + 70 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/41e010600a7e8b306e4b7d692efd51a598046a1382a35dd7bb01fd4aa49ff2f2.jpg", + "image_caption": [ + "Figure 2. Advantages of Combining RAG with Reasoning" + ], + "image_footnote": [], + "bbox": [ + 84, + 88, + 915, + 364 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "with direct generation and complex ones with multi-round retrieval to reduce latency [20]. Dynamic retrieval pruning uses pre-reasoning predictions to target key information, minimizing unnecessary document and graph traversal [41].", + "bbox": [ + 81, + 412, + 482, + 473 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "5) From Passive Knowledge Tool to Proactive Cognitive Assistant. Advancing beyond reactive knowledge retrieval, reasoning-enhanced systems can proactively serve users by asking clarifying questions and anticipating implicit needs. This shift enables human-like assistants that integrate memory, reasoning, and decision-making, proving especially valuable for complex tasks such as deep research [43], business analytics [50], personal assistant [107] and urban planning [85].", + "bbox": [ + 81, + 473, + 482, + 609 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "However, the synergistic pathway between RAG and reasoning requires more than simply replacing conventional generative LLMs with LRM modules. It necessitates deep integration of technological evolution insights from LRM - achieved through reconstructing knowledge retrieval mechanisms and strengthening reasoning-generation collaborative linkages - to enable system-level enhancement of cognitive capabilities within the RAG architecture.", + "bbox": [ + 81, + 609, + 482, + 728 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Therefore, this paper aims to address the pivotal and forward-looking research question of \"how RAG systems can synergize with reasoning capabilities\". We systematically review current studies after 2024 while establishing explicit definitions for reasoning within RAG contexts. Building on this foundation, we provide an in-depth taxonomy and analysis of the objectives, typical patterns, and implementations underlying RAG-reasoning integration, clarifying key technological trajectories and critical breakthroughs.", + "bbox": [ + 81, + 729, + 482, + 864 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As RAG technology enters its next developmental phase, downstream task complexity has escalated significantly -", + "bbox": [ + 83, + 864, + 482, + 896 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "particularly evident in emerging challenges like Deep Research [106]. These advanced applications not only demand enhanced reasoning capacities but also drive RAG's expansion into multimodal, cross-domain, and dynamic environments. However, while the integration of reasoning capabilities demonstrably improves complex task performance, existing research frequently overlooks associated computational overheads and potential risks. Through systematic examination of these operational constraints and analysis of industry applications, we propose practical guidelines for multiple real-world scenarios with diverse requirements.", + "bbox": [ + 511, + 412, + 915, + 578 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Finally, we outline future research directions grounded in current technological evolution, including: 1) RAG-graph architecture integration, 2) coordinated multimodal reasoning frameworks, 3) hybrid model collaboration, and 4) RL optimization specifically designed for RAG systems. This work establishes both theoretical foundations and practical roadmaps for subsequent research in this evolving field.", + "bbox": [ + 511, + 579, + 913, + 684 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The contributions of this paper can be summarized as follows:", + "bbox": [ + 513, + 685, + 911, + 712 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Pioneering Review. This work represents the first comprehensive survey focusing on the integration of RAG with reasoning, offering novel insights and forward-looking guidance for advancing this emerging research frontier.", + "- Systematic Taxonomy. We present a multi-dimensional framework to systematically examine the objectives, paradigms, and methodologies for combining RAG with reasoning capabilities, establishing clear classification criteria across technical dimensions.", + "- Practical Guidance. Beyond theoretical exploration, we critically discuss the additional cost and potential" + ], + "bbox": [ + 540, + 724, + 926, + 906 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Synergizing RAG and Reasoning: A Systematic Review", + "bbox": [ + 84, + 59, + 380, + 71 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 651, + 59, + 913, + 71 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "risks associated with the introduction of reasoning, accompanied by an actionable Practical Guide for real-world scenarios.", + "bbox": [ + 122, + 90, + 482, + 133 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "- Open Resource Platform1 Through the OpenRAG platform, we provide a rich, multi-dimensional review of related work, which allows readers to quickly search and compare different methods.", + "bbox": [ + 109, + 136, + 482, + 196 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2 Overview", + "text_level": 1, + "bbox": [ + 83, + 210, + 202, + 226 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This chapter establishes a conceptual framework for the paper along two key dimensions. First, it formally defines \"reasoning\" and distinguishes it from \"inference.\" Second, it organizes a taxonomy of synergy mechanisms between \"RAG and Reasoning.\" To construct a clear cognitive pathway, we address three progressive research questions:", + "bbox": [ + 81, + 231, + 482, + 323 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Why synergize RAG and reasoning?", + "- What are their typical collaboration paradigms?", + "- How can this integration be realized?" + ], + "bbox": [ + 109, + 327, + 480, + 372 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.1 Definition", + "text_level": 1, + "bbox": [ + 83, + 386, + 202, + 398 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The definition of reasoning in modern AI systems remains an evolving construct, particularly within the context of LRMs exemplified by DeepSeek R1 and OpenAI O1. Here, under the scope of LLMs, we formalize reasoning as a structured, multi-step process that dynamically decomposes complex problems, generates intermediate hypotheses, and iteratively refines solutions through logical and evidence-based transformations. Mathematically, let a reasoning process $\\mathcal{R}$ be defined as a tuple $\\langle \\mathcal{K}_p, \\mathcal{K}_r, S_t, \\Phi \\rangle$ , where $\\mathcal{K}_p$ denotes parametric knowledge embeddings, $\\mathcal{K}_r$ represents retrieved contextual knowledge, $S_t = \\{s_0, s_1, \\ldots, s_n\\}$ constitutes the evolving state sequence with $s_0$ as the initial query and $s_n$ as the final response, and $\\Phi : S_i \\times \\mathcal{K}_p \\times \\mathcal{K}_r \\to S_{i+1}$ defines the state transition function.", + "bbox": [ + 81, + 405, + 491, + 614 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The reasoning process exhibits three defining characteristics. First, it is inherently multi-step, systematically decomposing complex problems into intermediate cognitive states (e.g., sub-question generation or temporary conclusions) rather than pursuing direct input-output mapping. Second, it generates novel knowledge or facts – synthesizing implicit relationships, deriving latent constraints, or reformulating problems in ways not explicitly present in the initial input or parametric memory (e.g., transforming \"Is A greater than B?\" into comparative subquestions about A and B's attributes). Crucially, these representations are not merely retrieved but dynamically constructed through the reasoning trajectory. Third, the process is teleological – its architecture and termination conditions are explicitly optimized for complex problem resolution, where complexity is measured by the necessity of state transitions or the insufficiency of direct retrieval from either parametric $(\\mathcal{K}_p)$", + "bbox": [ + 81, + 616, + 482, + 875 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "or external $(\\mathcal{K}_r)$ knowledge sources. This stands in stark contrast to atomic inference, which lacks such deliberate state construction and goal-aware iteration.", + "bbox": [ + 511, + 90, + 913, + 136 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The distinction between reasoning and inference manifests most saliently in their computational signatures. While inference $\\mathcal{I}$ constitutes a single-step conditional probability computation $P(y|x) = \\prod_{t=1}^{T} P(y_t|x, y_{, ) to steer model behavior, tuning-based methods that inject domain-specific knowledge or distill reasoning capability, and RL-based frameworks that optimize retrieval-reasoning policies through outcome reward models (ORM) or process reward models (PRM). The alignment between these", + "bbox": [ + 511, + 690, + 916, + 902 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Synergizing RAG and Reasoning: A Systematic Review", + "bbox": [ + 84, + 59, + 380, + 71 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 651, + 59, + 913, + 71 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "methodologies and the proposed taxonomy is critical—static workflows predominantly rely on predictable prompt-guided reasoning chains, whereas dynamic systems increasingly integrate search-based exploration or solver-augmented strategies to navigate evolving state spaces.", + "bbox": [ + 83, + 90, + 482, + 167 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Overall, this tripartite taxonomy—motivational drivers, architectural paradigms, and implementation methodologies—establishes a unified lens for analyzing RAG+Reasoning systems. Subsequent chapters will elaborate on each stratum, progressively revealing how these conceptual distinctions translate into technical innovations that push the boundaries of machine intelligence.", + "bbox": [ + 81, + 167, + 482, + 273 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3 The purpose of the synergy", + "text_level": 1, + "bbox": [ + 84, + 306, + 359, + 324 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The integration of RAG and reasoning marks a crucial advancement in enhancing LLMs' problem-solving abilities. Their true potential lies not in isolated use but in their synergy, which overcomes key limitations in retrieval and reasoning. This section explains the main motivations for combining RAG with reasoning, emphasizing two primary benefits: (1) enhancing retrieval accuracy and flexibility through reasoning, and (2) reinforcing complex reasoning by using context-rich retrieved knowledge. Figure 4 illustrates these collaborative aims and the limitations they address.", + "bbox": [ + 81, + 325, + 482, + 476 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The first key benefit is Reasoning-Augmented Retrieval where reasoning improves the retrieval process. Traditional RAG systems struggle with query formulation, relevance assessment, and iterative refinement—tasks needing logical and contextual analysis. Reasoning enables adaptive retrieval through dynamic query expansion, ambiguity resolution, and multi-hop evidence aggregation, overcoming the limits of keyword- or embedding-based methods and aligning retrieval with the task's reasoning demands.", + "bbox": [ + 81, + 477, + 482, + 613 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The second benefit is Retrieval-Augmented Reasoning, where external knowledge supplements the limitations of purely parametric LLM reasoning. Even advanced models face hallucination, knowledge gaps, and compositional challenges alone. Retrieval grounds reasoning in up-to-date, domain-specific, or rare information absent from model weights, crucial for explainability, multi-step deduction, and integrating diverse sources.", + "bbox": [ + 81, + 613, + 496, + 733 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Together, combining RAG and reasoning fills fundamental gaps in both techniques. By enhancing retrieval via reasoning and strengthening reasoning through retrieval, it broadens LLMs' capacity to address complex real-world problems.", + "bbox": [ + 81, + 734, + 482, + 794 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1 Reasoning-Augmented Retrieval", + "text_level": 1, + "bbox": [ + 84, + 828, + 364, + 842 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Reasoning-Augmented Retrieval (RAR) represents a significant advancement in information retrieval by integrating multi-step reasoning to dynamically enhance retrieval quality. Unlike traditional methods that depend on static semantic", + "bbox": [ + 84, + 845, + 480, + 906 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "matching, RAR creates a cognitive feedback loop mimicking human iterative reasoning, surpassing the limitations of simple \"query-document\" interactions.", + "bbox": [ + 517, + 90, + 911, + 136 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "RAR's effectiveness stems from several key features. It often uses on-demand retrieval, where reasoning-evaluating intent clarity, knowledge state, and temporal factors-guides adaptive search initiation, reducing redundancies present in fixed triggers (e.g., UAR's classifier [14]). It improves semantic alignment by inferring implicit query logic such as business rules or entity relationships to generate precise retrieval requests aligned with data schemas (e.g., PlanRAG's plan-retrieval loops [48]). RAR also applies multi-step iterative refinement, using intermediate reasoning outputs (e.g., chain-of-thought, partial answers [78]) to recursively reformulate queries in a closed-loop system essential for resolving multi-hop dependencies [68]. Furthermore, it adapts to specific domains by tailoring retrieval to vertical contexts (e.g., financial or medical) and balances efficiency and precision through lightweight reasoning strategies (e.g., AdaptiveRAG's complexity-based selection [41]).", + "bbox": [ + 511, + 136, + 913, + 393 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Traditional retrieval systems, effective for simple queries, struggle with complex information needs due to rigid designs favoring static matching over dynamic reasoning, limiting their adaptability to changing contexts and diverse data. RAR primarily addresses five core challenges inherent in these conventional methods.", + "bbox": [ + 517, + 393, + 911, + 482 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1.1 Semantic Disparities Between Queries and Documents. A key challenge lies in the mismatch between user queries and documents—whether due to differing expression styles (professional jargon vs. casual language) or implicit contextual gaps—making direct semantic matching unreliable. Importantly, high similarity does not guarantee true relevance, as documents may share keywords or surface features without addressing the underlying intent or logic of the query. Retrieval models must therefore understand deeper semantics beyond superficial similarity. Domain adaptation further complicates this issue. To overcome these gaps, approaches such as reasoning-augmented embeddings (O1-Embedder [101] enriches queries with inferred \"thinking\" text), feedback-driven rewriting (SmartRAG [20] dynamically refines queries based on retrieved results), and preplanning (PlanRAG [48] extracts business rules to generate SQL queries aligned with database schemas) help better capture domain-specific semantics and ensure relevance beyond mere similarity.", + "bbox": [ + 511, + 491, + 913, + 777 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1.2 Inflexible Intent Disambiguation. Traditional RAG methods rely on fixed embedding similarity strategies, which fail to dynamically interpret the implicit intent behind complex queries (e.g., multi-hop reasoning or domain-specific requirements). User queries often exhibit semantic complexity that far exceeds their surface text—for instance, a request to \"optimize supply chain costs\" may require correlating disparate database fields not explicitly", + "bbox": [ + 517, + 785, + 911, + 906 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 59, + 346, + 71 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Gao et al.", + "bbox": [ + 857, + 59, + 911, + 70 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/09571464f1bd88bbbe376dc373e70dd2c58a83763d1dbb94f0dcc2d042a01304.jpg", + "image_caption": [ + "Figure 4. The purpose of the synergy between RAG and reasoning" + ], + "image_footnote": [], + "bbox": [ + 98, + 104, + 129, + 138 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Core Limitations in RAG", + "text_level": 1, + "bbox": [ + 130, + 113, + 250, + 138 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Semantic Disparities", + "text_level": 1, + "bbox": [ + 313, + 116, + 452, + 128 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Lexical and contextual disparitie", + "bbox": [ + 300, + 133, + 437, + 143 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "(e.g., terminology mismatch, implicit", + "bbox": [ + 300, + 143, + 467, + 152 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "context absence)", + "bbox": [ + 300, + 152, + 375, + 161 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Failure of semantic similarity matching", + "bbox": [ + 303, + 164, + 464, + 172 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Knowledge Gaps", + "text_level": 1, + "bbox": [ + 545, + 116, + 658, + 128 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Long-range reasoning tasks(e.g., multi-", + "bbox": [ + 521, + 132, + 683, + 138 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "hop QA)", + "bbox": [ + 521, + 140, + 555, + 148 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Requiring logical integration across", + "bbox": [ + 519, + 152, + 684, + 159 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "multiple knowledge segments", + "bbox": [ + 519, + 160, + 643, + 167 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Absence of intermediate knowledge", + "bbox": [ + 519, + 170, + 684, + 176 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "leads to reasoning chain fragmentation", + "bbox": [ + 519, + 178, + 684, + 185 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Core Limitations", + "text_level": 1, + "bbox": [ + 745, + 114, + 864, + 126 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "in Reasoning", + "text_level": 1, + "bbox": [ + 756, + 128, + 851, + 141 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/134e9289b64c48e173041598397d78a2ffff93d776484a12645311f697b63f1f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 864, + 109, + 895, + 140 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Inflexible Intent Disambiguation", + "text_level": 1, + "bbox": [ + 183, + 186, + 287, + 208 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Failure to resolve implicit intents in", + "bbox": [ + 153, + 214, + 318, + 220 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "complex queries", + "bbox": [ + 153, + 222, + 223, + 229 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "(e.g., multi-hop reasoning, domain-", + "bbox": [ + 153, + 232, + 316, + 239 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "specific requirements)", + "bbox": [ + 153, + 239, + 246, + 247 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The semantic complexity of user queries", + "bbox": [ + 153, + 250, + 316, + 257 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "may far exceed their surface text", + "bbox": [ + 153, + 258, + 285, + 265 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Heterogeneous Data Collaboration", + "text_level": 1, + "bbox": [ + 102, + 282, + 334, + 292 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Schema-disparate data sources", + "bbox": [ + 125, + 301, + 258, + 308 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "(e.g., structured records vs. unstructured passages)", + "bbox": [ + 125, + 311, + 313, + 327 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Requires cross-modal retrieval and alignment", + "bbox": [ + 125, + 330, + 313, + 338 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Efficiency vs. Precision", + "text_level": 1, + "bbox": [ + 176, + 354, + 331, + 366 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Comprehensive Retrieval $\\rightarrow$ Overhead", + "bbox": [ + 171, + 372, + 331, + 380 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Restricted Retrieval $\\rightarrow$ Critical info loss", + "bbox": [ + 171, + 383, + 331, + 392 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Iterations $\\uparrow \\rightarrow$ Computational costs", + "bbox": [ + 171, + 397, + 320, + 405 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Lack of dynamic trade-off mechanism", + "bbox": [ + 171, + 409, + 328, + 417 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Reasoning Augmented Retrieval", + "text_level": 1, + "bbox": [ + 341, + 204, + 596, + 244 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "RAG", + "text_level": 1, + "bbox": [ + 473, + 271, + 524, + 287 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Reasoning", + "text_level": 1, + "bbox": [ + 437, + 325, + 552, + 344 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Retrieval", + "text_level": 1, + "bbox": [ + 495, + 359, + 591, + 376 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Augmented Reasoning", + "text_level": 1, + "bbox": [ + 421, + 380, + 661, + 398 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Search Space Explosion & Local Optima Traps", + "text_level": 1, + "bbox": [ + 485, + 431, + 645, + 455 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Search space grows exponentially with reasoning steps", + "bbox": [ + 483, + 460, + 647, + 474 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Traditional multi-step reasoning methods lack external knowledge constraints", + "bbox": [ + 483, + 478, + 647, + 493 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Lead to invalid hypotheses, local optima", + "bbox": [ + 483, + 497, + 647, + 503 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "traps, or logical inconsistencies", + "bbox": [ + 483, + 505, + 612, + 511 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Domain Knowledge Boudary", + "text_level": 1, + "bbox": [ + 668, + 196, + 852, + 208 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Pre-trained models exhibit constrained knowledge coverage", + "bbox": [ + 676, + 214, + 841, + 231 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Struggle with tasks requiring domain-specific expertise", + "bbox": [ + 676, + 234, + 839, + 250 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "(e.g., semiconductor design)", + "bbox": [ + 676, + 251, + 795, + 258 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Processing tasks requiring real-time information is challenging", + "bbox": [ + 678, + 262, + 841, + 277 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Dynamic Knowledge Requirements", + "text_level": 1, + "bbox": [ + 725, + 289, + 857, + 311 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Progressively evolving knowledge requirements", + "bbox": [ + 694, + 316, + 888, + 325 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Initial retrieval results are irrelevant or redundant to subsequent reasoning steps", + "bbox": [ + 697, + 330, + 887, + 344 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Dynamically evolving information needs in complex reasoning tasks", + "bbox": [ + 697, + 347, + 887, + 363 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Fixed retrieval strategies struggle to achieve real-time matching", + "bbox": [ + 697, + 366, + 885, + 381 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Insufficient Depth & Breadth", + "text_level": 1, + "bbox": [ + 669, + 396, + 857, + 407 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The inherent static knowledge of LLMs", + "bbox": [ + 681, + 415, + 846, + 422 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Challenge of covering dynamically evolving domain knowledge boundaries", + "bbox": [ + 681, + 426, + 846, + 441 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The reasoning chains frequently terminate at superficial associations", + "bbox": [ + 681, + 446, + 846, + 462 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The inability to establish cross-domain, multi-level knowledge connections", + "bbox": [ + 681, + 464, + 844, + 479 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "mentioned. Static retrieval methods lack the adaptability to capture such dynamically evolving information needs. A critical limitation lies in intent dynamicity: as contextual understanding expands, traditional systems generate fixed retrieval results based solely on the initial query. Furthermore, semantic representation limitations of dense retrieval models (e.g., BERT-based models) hinder their ability to encode intricate semantic relationships (e.g., irony, metaphors), leading to misaligned results. Current approaches attempt to mitigate these issues through multi-step intent decomposition (e.g., LevelRAG's high-level searcher breaks complex queries into multi-hop sub-queries [103]) and dynamic query reformulation (e.g., LeReT's reinforcement learning generates diversified query candidates [34]), iteratively refining retrieval strategies to align with document content.", + "bbox": [ + 81, + 585, + 483, + 813 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.1.3 Inefficient Coordination of Multi-Source Heterogeneous Data. Retrieval from diverse sources—text, tables, graphs, web, and APIs—often produces fragmented results due to a lack of global reasoning. The key challenge is modal heterogeneity: different retrieval techniques (dense", + "bbox": [ + 81, + 830, + 483, + 906 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "retrieval for text, SQL for tables, GQL for graphs) operate independently without unified coordination. For example, experiments show standard RAG methods (like dense retrieval with query decomposition) yield only $32.7\\%$ perfect recall and $40.9\\%$ F1 on the OTT-QA dataset. These outcomes reveal the limitations of traditional approaches in aligning textual queries with structured tables—such as failing to link concepts like \"K-12 student free rates\" in text to related \"education expenditure\" columns when not explicitly mentioned. Additionally, disconnected entity matching (e.g., relating \"company revenue\" in text to financial tables) worsens inefficiencies, as conventional methods depend on semantic similarity and overlook domain-specific relationships and exact-value matches. Advanced techniques—such as reasoning-driven alignment (ARM's N-gram constraints for cross-modal entity decoding [7]) and unified semantic spaces (LevelRAG's shared multi-modal representations [103])—enable more effective, integrated retrieval.", + "bbox": [ + 511, + 585, + 929, + 859 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.1.4 Incompleteness and Incoherence in Complex Retrieval Tasks. Single-step retrieval systems fall short in", + "bbox": [ + 513, + 876, + 913, + 906 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Synergizing RAG and Reasoning: A Systematic Review", + "bbox": [ + 84, + 59, + 380, + 71 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 651, + 59, + 913, + 71 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "complex multi-hop reasoning tasks, such as deducing entity chains or conducting decision analysis. Traditional static retrieval conflicts with multi-step cognitive needs, resulting in three main issues: 1) Path dependency, where later retrievals rely on information from earlier steps (e.g., finding \"the most populous county in California\" before its education policies), but conventional systems lack state management; 2) Error propagation, early retrieval errors cause mistakes in intermediate results, which then affect the next round of retrieval; 3) Semantic inflexibility of fixed queries, which cannot adapt to dynamic concepts like entity aliases or relational predicates.", + "bbox": [ + 81, + 90, + 482, + 257 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Advanced methods address these flaws through integrated strategies. PlanRAG uses iterative \"plan-retrospect-replan\" cycles to trigger sub-queries when gaps arise. Reinforcement learning in LeReT improves query generation via reward-driven path selection. Likewise, ITER-RETGEN rebuilds follow-up queries using intermediate answers (e.g., \"award recipient's height\") to resolve multi-hop dependencies.", + "bbox": [ + 81, + 257, + 491, + 364 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.1.5 Trade-offs Between Retrieval Efficiency and Precision. Complex scenarios face a tension between exhaustive retrieval, which is computationally costly, and restricted retrieval, which risks information loss. Expanding retrieval blindly inflates costs (e.g., LLM API calls) without ensuring relevance. Simple queries suffer from unnecessary multi-step retrieval, wasting resources, while complex queries face quality risks if retrieval is too limited. Adaptive approaches like complexity-aware routing (Adaptive-RAG's lightweight classifier allocates retrieval budgets [41]) and cost-sensitive training (SmartRAG's reinforcement learning balances quality and steps [20]) dynamically manage this trade-off.", + "bbox": [ + 81, + 382, + 482, + 561 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In summary, Reasoning-Augmented Retrieval overcomes traditional RAG's limitations in dynamic triggering, semantic alignment, multi-hop support, domain adaptation, and efficiency trade-offs by deeply integrating reasoning into the retrieval process. Its key innovation is a bidirectional enhancement between reasoning and retrieval—reasoning refines retrieval strategies, while retrieval supports iterative reasoning—jointly boosting accuracy and efficiency in complex information tasks.", + "bbox": [ + 81, + 563, + 482, + 699 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.2 Retrieval-Augmented Reasoning", + "text_level": 1, + "bbox": [ + 83, + 720, + 366, + 736 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Retrieval-Augmented Reasoning (ReAR) combines external knowledge retrieval with inherent model reasoning to overcome failures from knowledge gaps or logical discontinuities in complex tasks. Unlike traditional RAG methods that retrieve information once, ReAR uses an iterative, context-sensitive retrieval that continuously provides relevant data to support multi-step reasoning. This approach is crucial for tasks needing strict logic, such as mathematical proofs, where intermediate steps require specific theorems or lemmas. By making retrieval an adaptive, ongoing process rather than a one-time step, ReAR strengthens each reasoning stage", + "bbox": [ + 81, + 739, + 482, + 906 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "with accurate, current information, improving the overall inference's reliability and robustness.", + "bbox": [ + 513, + 90, + 911, + 119 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ReAR's core feature is dynamic knowledge supplementation, generating retrieval queries in real-time based on the evolving reasoning context. This overcomes the limits of single-round retrieval by enabling knowledge refinement at each step, as seen in process supervision frameworks like RAG-Gym [96]. ReAR also improves reasoning paths using methods like search space compression—for example, MCTS-guided heuristics in KBQA—and structured feedback from diverse sources like knowledge graphs [97]. These techniques maintain logical consistency while reducing irrelevant or conflicting information. Importantly, ReAR adapts well across domains, supporting precise knowledge retrieval and tool use for specialized tasks such as industrial problem-solving in PIKE [82] or scientific reasoning [106].", + "bbox": [ + 511, + 121, + 913, + 332 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "By integrating retrieval as an active part of the reasoning loop, ReAR addresses LLMs' temporal and depth constraints, ensuring adherence to domain-specific and time-sensitive requirements. This close coupling turns external knowledge into an on-demand resource, creating a closed-loop system that enhances the model's ability to handle complex, knowledge-intensive problems. Specifically, ReAR seeks to address the following limitations and challenges:", + "bbox": [ + 511, + 333, + 913, + 454 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.2.1 Knowledge Gap in Multi-step Reasoning. In long-range reasoning, missing intermediate knowledge often breaks logical chains, especially in industrial and scientific contexts requiring multi-source data integration (e.g., text, tables, time-series). Static retrieval methods worsen this by not adapting to the reasoning process's changing needs. ReAR techniques address this with chained retrieval, as in CoRAG [83], which breaks multi-hop questions into sequential sub-queries (e.g., retrieving \"event causes\" then their \"impacts\"), systematically linking knowledge. Reasoning-state-aware retrieval, used in FLARE [45], predicts future information needs by generating interim prompts (e.g., \"the next step requires discussion of ...\"), enabling dynamic query construction that preserves coherence. Together, these approaches resolve the conflict between discrete retrieval and continuous reasoning.", + "bbox": [ + 511, + 460, + 919, + 702 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.2.2 Reasoning Discontinuity Caused by Domain Knowledge Boundaries. Reasoning discontinuity arises from LLMs' limited knowledge, struggling with specialized domains (e.g., semiconductor design in PIKE [82]) and real-time data (e.g., medical parameters in Agentic Reasoning [92]). End-to-end models often produce factual errors, while traditional RAG methods fail to retrieve deep professional knowledge due to coarse retrieval, especially with complex data like tables, charts and images.", + "bbox": [ + 511, + 710, + 913, + 844 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ReAR addresses this with two complementary solutions: knowledge atomization and structural organization, as in PIKE's decomposition of documents into fine-grained units and multi-layer knowledge graphs for semantic and logical", + "bbox": [ + 511, + 845, + 913, + 906 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 59, + 346, + 71 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Gao et al.", + "bbox": [ + 857, + 59, + 911, + 70 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "retrieval; and dynamic tool integration, as in Agentic Reasoning's real-time data acquisition via code execution and API calls to compute critical indicators (e.g., medical FiO2). These innovations overcome the challenges of specialized knowledge depth and timely information relevance that limit conventional methods.", + "bbox": [ + 81, + 90, + 480, + 181 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "3.2.3 Search Space Explosion and Local Optima Traps.", + "text_level": 1, + "bbox": [ + 81, + 196, + 482, + 210 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The main challenge in multi-step reasoning is the exponential growth of the search space, where methods like Chain-of-Thought (CoT) often yield suboptimal or inconsistent results due to unconstrained hypotheses. Traditional approaches like CoT and Tree-of-Thought (ToT) lack external knowledge constraints, causing invalid assumptions, while purely symbolic reasoning falls short in open-domain tasks. To address this, two strategies are used: knowledge base-anchored heuristic search (KBQA-O1 [58]), which limits reasoning actions to subgraphs in knowledge graphs, and a retrieval-verification mechanism (Search-o1 [51]) that prunes unsupported reasoning paths using evidence from the knowledge base. Together, these reduce the search space and preserve reasoning coherence.", + "bbox": [ + 81, + 212, + 482, + 422 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "3.2.4 Dynamic Knowledge Requirements in Multi-Step Reasoning. Complex multi-step reasoning tasks face the challenge of continuously changing knowledge requirements. This is evident in cases like multi-hop reasoning and engineering planning, where each stage generates new sub-problems (e.g., moving from \"architectural design\" to \"material cost estimation\"). Static knowledge bases or one-time retrieval methods cannot meet this evolving demand. This manifests in two ways: initial knowledge may miss later needs, causing gaps; and fixed knowledge sets may include irrelevant information, reducing reasoning accuracy. To address this, new retrieval-augmented reasoning approaches introduce dynamic solutions: process supervision (e.g., reward models in RAG-Gym [96]) detects knowledge gaps in real time, atomic decision-making (e.g., step decomposition in DeepRAG [24]) triggers retrieval as needed, and tree-like expansions (e.g., multi-path retrieval in DeepSolution [54]) enable parallel exploration. By integrating knowledge retrieval within reasoning, these methods let the system identify, supplement, and verify knowledge dynamically—much like a human expert—greatly enhancing the reliability and completeness of complex reasoning.", + "bbox": [ + 81, + 438, + 482, + 771 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "3.2.5 Insufficient Depth and Breadth of Reasoning.", + "text_level": 1, + "bbox": [ + 81, + 785, + 482, + 800 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This issue is prominent in expert tasks like medical diagnosis, legal analysis, and research report generation. LLMs' static knowledge often fails to capture the evolving scope of domain knowledge, resulting in shallow reasoning that misses multi-level, cross-domain connections. For example, when assessing \"Company A is affected by economic recession,\" traditional methods rely on superficial statistical", + "bbox": [ + 81, + 801, + 482, + 906 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "patterns and cannot systematically follow the deeper logical chain from \"Company A $\\rightarrow$ industry supply chain $\\rightarrow$ macroeconomic policy $\\rightarrow$ international political landscape,\" leading to reasoning that lacks causal depth.", + "bbox": [ + 511, + 90, + 913, + 151 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To overcome this, recent advances use structured, retrieval-enhanced frameworks. ToG2.0 [60] models Knowledge Graph relational paths as retrieval guidance vectors, enabling targeted queries along entity paths, surpassing the limits of keyword-based retrieval. This approach complements CR-Planner's [52] iterative expansion, which triggers retrieval of specialized knowledge (e.g., textbook proofs of algorithm complexity) at critical reasoning points, ensuring accurate domain knowledge integration via multi-round validation. Addressing cross-domain knowledge linkage, CO-STORM [43] employs a multi-agent system whose host module generates cross-modal retrieval commands by analyzing potential semantics in uncited documents.", + "bbox": [ + 511, + 152, + 913, + 347 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4 Patterns of synergy", + "text_level": 1, + "bbox": [ + 513, + 367, + 722, + 385 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Section 3 detailed the need and motivation for integrating RAG with reasoning. Building on this, this section presents two core implementation patterns for RAG-reasoning synergy (Figure 5): (1) the Pre-defined Workflow, which uses logical architectures with preset rules for coordination, and (2) Dynamic Workflow, which relies on context-aware, adaptive coordination via real-time decision engines. These patterns illustrate current frameworks combining knowledge retrieval and multi-step reasoning from deterministic and flexible perspectives.", + "bbox": [ + 511, + 387, + 913, + 539 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.1 Pre-defined workflow", + "text_level": 1, + "bbox": [ + 513, + 556, + 718, + 569 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Pre-defined workflow is a multi-step reasoning approach with a fixed architecture and sequential execution, emphasizing process clarity and operational determinism. It consists of predefined iterative stages, each with strict input-output rules and no dynamic changes based on intermediate results. This modular design ensures controllability and structured reasoning for complex tasks. All steps are executed regardless of intermediate outcomes, guaranteeing repeatability and stability while avoiding uncertainties from dynamic decisions. Although it sacrifices adaptability, this approach offers procedural predictability and is well-suited for scenarios demanding clear reasoning paths, albeit with possible computational redundancy due to lack of real-time adjustments.", + "bbox": [ + 511, + 575, + 913, + 785 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Mathematically, the pre-defined RAG workflow can be formalized as a deterministic multi-step operational chain. Given an input query $Q$ and a predefined sequence of $N$ reasoning steps and the final decision output $D$ , the complete workflow is expressed as:", + "bbox": [ + 511, + 786, + 913, + 862 + ], + "page_idx": 8 + }, + { + "type": "equation", + "text": "\n$$\nD = f _ {N} \\circ \\dots \\circ f _ {2} \\circ f _ {1} (Q) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 629, + 891, + 913, + 907 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Synergizing RAG and Reasoning: A Systematic Review", + "bbox": [ + 84, + 59, + 380, + 71 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 651, + 59, + 911, + 71 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/044160498b6a6dec3d4b731753eb83312cd53fabf064ad53ce6793173d12947b.jpg", + "image_caption": [ + "Figure 5. Patterns of Synergy between RAG and Reasoning" + ], + "image_footnote": [], + "bbox": [ + 84, + 88, + 911, + 333 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "where each $f_{i}\\in \\{\\Psi ,R,\\Gamma \\}$ denotes strictly defined functions for reasoning $(\\Psi)$ , retrieval $(R)$ , or decision-making $(\\Gamma)$ , with $\\circ$ representing function composition. This formulation adheres to the fixed mapping sequence $Q\\mapsto \\Psi (Q)\\mapsto R(\\Psi (Q))\\mapsto \\Gamma (R(\\Psi (Q)))$ , exhibiting Markovian properties where $f_{t + 1}$ depends solely on $f_{t}$ 's output while remaining independent of historical states $\\{f_{< t}\\}$ . The chained composition guarantees process closure and reproducibility, though constrained by the static combinatorial nature of $\\{f_i\\}_{i = 1}^N$ .", + "bbox": [ + 81, + 382, + 482, + 518 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In the pre-defined pipeline, based on the position where reasoning is introduced, it can be further divided into Pre-Retrieval, Post-Retrieval, and Hybrid.", + "bbox": [ + 81, + 518, + 482, + 563 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.1.1 Pre-Retrieval Reasoning. For pre-retrieval methods, the sequence is explicitly defined as", + "bbox": [ + 81, + 571, + 482, + 602 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\nD = \\Gamma \\circ \\mathcal {R} \\circ \\Psi (Q) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 218, + 611, + 480, + 626 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "where $\\Psi$ denotes a reasoning operator that systematically transforms or enriches the query prior to retrieval. This paradigm enhances retrieval precision by resolving ambiguities, inferring implicit intents, or optimizing query representations. Current research identifies four principal methodological categories for designing $\\Psi$ :", + "bbox": [ + 81, + 633, + 482, + 724 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Query Optimization focuses on generating and selecting query variants to maximize retrieval relevance. Mathematically, this is formalized as Candidates $=$ Generate(Q,C), $\\Psi_{\\mathrm{Optimize}}(Q,C) = \\arg \\max_{\\mathrm{candidate} \\in \\mathrm{Candidates}}$ Score(candidate), where (Generate) produces candidate queries and (arg max) selects optimal variants based on contrastive training or reinforcement learning. Representative implementations, such as LeReT [34], leverage iterative sampling and optimization to balance query diversity and specificity.", + "bbox": [ + 81, + 724, + 482, + 861 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Attribute Judgment employs classification mechanisms to dynamically regulate retrieval triggers. This is modeled as $\\Psi_{\\mathrm{Classify}}(Q) = \\mathrm{Classify}(Q)$ , where Classify evaluates query", + "bbox": [ + 83, + 861, + 482, + 907 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "attributes (e.g., temporal sensitivity, intent complexity) against predefined criteria. Frameworks like UAR [14] and AdaptiveRAG [41] exemplify this approach by integrating multistage classifiers to minimize unnecessary retrievals.", + "bbox": [ + 511, + 382, + 918, + 441 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Plan Generation decomposes complex queries into structured sub-task sequences to guide retrieval direction. Formulated as $\\Psi_{\\mathrm{Plan}}(Q) = \\mathrm{Plan}(Q)$ , the operator Plan generates hierarchical task decompositions, as seen in PlanRAG [48], which utilizes chain-of-thought reasoning to align retrieval targets with multi-step problem-solving requirements.", + "bbox": [ + 511, + 443, + 915, + 532 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Semantic Enhancement enriches query representations using domain-specific or task-aware embeddings. Expressed as $\\Psi_{\\text{Enhance}}(Q) = \\text{Encode}(Q, \\mathcal{K})$ , where $\\mathcal{K}$ denotes auxiliary knowledge (e.g., reasoning trajectories), methods like O1-Embedder [101] integrate latent reasoning patterns into query embeddings to improve retrieval robustness.", + "bbox": [ + 511, + 534, + 913, + 623 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Collectively, these methodologies demonstrate that pre-retrieval reasoning serves as a systematic interface to mitigate semantic gaps between raw queries and knowledge bases, establishing a critical component for precision-driven RAG architectures.", + "bbox": [ + 511, + 625, + 913, + 698 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.1.2 Post-Retrieval Reasoning. In pre-defined RAG systems with multi-step reasoning pipelines, the post-retrieval reasoning paradigm represents a critical advancement where cognitive processing occurs after information retrieval from external sources. This approach addresses inherent limitations in conventional RAG, particularly in managing knowledge conflicts, mitigating information insufficiency, and enhancing logical consistency across complex reasoning tasks. Mathematically, this process can be formalized as a deterministic function composition:", + "bbox": [ + 511, + 712, + 915, + 864 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\nD = \\Gamma \\circ \\Psi \\circ \\mathcal {R} (Q) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 651, + 891, + 911, + 906 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 59, + 346, + 71 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Gao et al.", + "bbox": [ + 857, + 59, + 911, + 71 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "$\\mathcal{R}$ denotes the retrieval operator, $\\Psi$ implements the reasoning transformation, and $\\Gamma$ represents the final decision function.", + "bbox": [ + 83, + 90, + 480, + 119 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The core characteristic of Post-Retrieval Reasoning lies in its execution of the reasoning process after retrieval, with the reasoning target being the retrieved content. ToG2.0 [60] proposes an iterative multi-step reasoning framework that alternates between graph retrieval and context retrieval, integrating the reasoning judgment of LLMs to progressively expand entities and prune irrelevant information, ultimately generating accurate answers. This approach dynamically addresses the issue of insufficient information through iterative refinement while establishing a dual-evidence verification mechanism via knowledge graph relation pruning and entity-guided context retrieval. Its graph-structured reasoning module transforms the connectivity validation of triple paths into a constraint satisfaction problem, effectively mitigating logical inconsistencies between text fragments and thereby significantly improving the quality of complex question answering.", + "bbox": [ + 81, + 121, + 482, + 378 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "ActiveRAG [100], on the other hand, employs a predefined three-stage process (Self-Inquiry $\\rightarrow$ Knowledge Assimilation $\\rightarrow$ Thought Accommodation) to structurally comprehend and calibrate retrieved knowledge, resolving conflicts between parametric memory and external knowledge. During the Knowledge Assimilation stage, ActiveRAG enhances the corrective effect of external knowledge on the internal representations of LLMs through multi-instruction fine-tuning strategies (e.g., counterfactual comparison and anchor association), substantially reducing the likelihood of hallucination generation. ARM's [7] structural alignment and self-verification stages also demonstrate optimization for post-retrieval reasoning. By incorporating domain knowledge via mixed-integer programming (MIP) solvers, ARM ensures the rationality and coverage of retrieval results, providing a scalable optimization framework for multi-source data compatibility and thereby enabling globally optimal cross-modal retrieval.", + "bbox": [ + 81, + 378, + 482, + 650 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.1.3 Hybrid Reasoning. The Hybrid pattern of pre-defined process forms a composite processing paradigm by integrating pre-retrieval reasoning with post-retrieval reasoning. The essence is formalized as a multi-round recursive iterative process, where each iteration cycle strictly comprises three phases: Retrieval, Generation, and Reasoning, executed as structured composite operations. Let the total number of iterations be $T$ ; the workflow is defined as:", + "bbox": [ + 81, + 657, + 495, + 779 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nQ _ {T} = \\left(\\bigcirc_ {t = 1} ^ {T} \\mathcal {R} _ {\\square} \\circ \\Gamma_ {t} \\circ \\Psi_ {t}\\right) \\left(Q _ {0}\\right) \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 789, + 482, + 814 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Here, each iterative unit is indexed by $t$ . The process terminates when a predefined condition $\\mathcal{T}(Q_t, D_t, C_t)$ is met, yielding the final response $\\Gamma_{\\mathrm{final}}(C_T)$ . This recursive mechanism enables dynamic synergy between knowledge acquisition and semantic inference, overcoming the linear limitations of single-cycle retrieval-generation frameworks.", + "bbox": [ + 81, + 816, + 483, + 906 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "IR-CoT [78] leverages chain-of-thought reasoning to iteratively construct intermediate logic chains, enabling multi-hop retrieval guided by progressively refined contextual cues. FinSearch [50] introduces a dual-phase architecture that first generates structured search graphs to model temporal and entity dependencies, followed by dynamic query rewriting to optimize financial data retrieval. LevelRAG employs hierarchical validation mechanisms, aggregating multi-granular retrieval results and triggering supplementary retrievals based on context completeness assessments. ITER-RETGEN [68] utilizes generation-enhanced feedback loops to iteratively refine query representations, enhancing semantic alignment between retrieval and generation phases.", + "bbox": [ + 511, + 90, + 913, + 287 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "These approaches share a common foundation in structured recursion while diverging in operational mechanisms. By enforcing deterministic iteration cycles, they balance controlled workflow execution with adaptive semantic exploration, addressing challenges such as multi-step reasoning, temporal coherence, and cross-domain knowledge synthesis. The hybrid paradigm's strength lies in its capacity to decompose complex queries into iterative retrieval-generation units, systematically bridging knowledge gaps while maintaining interpretability and robustness in open-domain problem-solving scenarios.", + "bbox": [ + 511, + 287, + 931, + 454 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.2 Dynamic RAG Workflow", + "text_level": 1, + "bbox": [ + 513, + 479, + 741, + 494 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The RAG with dynamic workflow represents an autonomous reasoning architecture centered around LLMs, characterized by the integration of non-deterministic operational workflows and real-time decision-making capabilities. Unlike predefined pipelines, this architecture enables continuous monitoring of reasoning states to dynamically trigger retrieval, generation, or verification operations. The LLM actively evaluates contextual demands during reasoning processes, autonomously determining optimal moments for invoking external tools or resources through a hybrid feedback coordination mechanism. By eliminating fixed iterative units and pre-determined tool-calling sequences, the framework achieves dynamic evolution of execution pathways, demonstrating superior adaptability in complex cognitive tasks through real-time adjustment of computational workflows based on intermediate reasoning outcomes.", + "bbox": [ + 511, + 500, + 913, + 739 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "This dynamic architecture manifests three principal characteristics: 1) Operator invocation is governed by the LLM's contextual state analysis, exemplified through special token prediction (e.g., '[Web-Search]' or `') to initiate external operations; 2) Reasoning trajectories exhibit high flexibility, allowing dynamic query reformulation and sub-problem generation to overcome limitations of static workflows; 3) Context-driven decision mechanisms prioritize real-time reasoning states over predefined rules, enhancing systemic responsiveness to emergent task complexities while improving precision.", + "bbox": [ + 511, + 739, + 913, + 906 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Synergizing RAG and Reasoning: A Systematic Review", + "bbox": [ + 84, + 59, + 380, + 71 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 651, + 59, + 911, + 71 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Defining the reasoning state at time $t$ as $S_{t} = (H_{t}, C_{t})$ , where $H_{t}$ denotes historical information aggregation and $C_{t}$ represents contextual embedding vectors, the decision process is modeled as a stochastic system:", + "bbox": [ + 81, + 90, + 482, + 152 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\na _ {t + 1} \\sim \\pi \\left(S _ {t}; \\Theta\\right) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 228, + 170, + 482, + 186 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\nS _ {t + 1} = \\delta \\left(S _ {t}, \\mathcal {T} _ {a _ {t + 1}} \\left(S _ {t}\\right)\\right) \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 210, + 482, + 227 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Here, $\\pi : S \\to \\Delta(\\mathcal{A})$ constitutes the policy function mapping states to probability distributions over action space $\\mathcal{A}$ (retrieval, generation, verification, etc.), while $\\mathcal{T}_a$ denotes state transition functions corresponding to action $a$ . The non-Markovian nature of the system emerges from $S_{t+1}$ 's dependence on complete historical trajectories $\\{S_{\\leq t}\\}$ , with dynamic adaptability ensured through extensible action spaces $\\mathcal{A}$ and online optimization of policy parameters $\\Theta$ . This formulation enables context-sensitive state updates via $\\delta : S \\times \\mathcal{O} \\to S$ , establishing a theoretical foundation for open-ended reasoning processes in complex problem domains.", + "bbox": [ + 81, + 232, + 482, + 398 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Based on the mode of reasoning initiation, agentic RAG with dynamic workflows can be further categorized into three distinct types: Proactivity-driven, Reflection-driven, and Feedback-driven mechanisms. The LLM proactivity-driven approach is characterized by the model's autonomous triggering of actions based on internal assessments, executing operations without external intervention through mechanisms analogous to human intuitive decision-making—for instance, when the model independently identifies insufficient evidentiary support in the current reasoning process, it proactively generates retrieval requests to supplement information. The reflection-driven mode emphasizes self-examination of the reasoning process, dynamically initiating subsequent operations through quantitative evaluation of intermediate result quality (e.g., triggering actions when the calculated reasoning support score of 0.7 exceeds a predefined threshold of 0.6), which simulates the self-optimization logic of expert systems, enabling the model to adjust reasoning pathways through introspection. The feedback-driven mechanism incorporates external intervention, employing independent models or rule-based systems to perform real-time scoring of intermediate states (e.g., an external reward model assigning a 2.5/5 score to reasoning steps) while providing corrective suggestions, operating similarly to a mentor-guided mode that continuously calibrates the reasoning workflow through external feedback signals.", + "bbox": [ + 81, + 398, + 482, + 791 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.2.1 Proactivity-Driven Reasoning. The core innovation of Proactivity-driven Reasoning lies in enabling LLMs to fully govern the reasoning process through self-triggered prediction mechanisms. This active control manifests through three key mechanisms: (1) direct tool invocation via model-generated special tokens (e.g., [Web-Search]), without external intervention, (2) context-aware decision making based", + "bbox": [ + 81, + 800, + 482, + 906 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "on real-time knowledge gaps or hypothesis verification requirements, and (3) Markov Decision Process (MDP)-based dynamic path optimization.", + "bbox": [ + 511, + 90, + 913, + 136 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Formally, the reasoning process can be modeled as a state sequence $S = \\{s_0, s_1, \\ldots, s_t\\}$ , where each state $s_t$ encapsulates the current reasoning context. At each step $t$ , the LLM selects an action $a_t \\in \\{\\text{retrieve, generate, terminate}\\}$ based on $s_t$ , executes the corresponding operation (e.g., document retrieval or answer generation), and updates its state through transition function $s_{t+1} = \\delta(s_t, a_t, o_t)$ where $o_t$ represents action outcomes. This MDP framework enables dynamic path adjustment through real-time feedback until termination ( $a_T = \\text{terminate}$ ) and final answer generation.", + "bbox": [ + 511, + 136, + 913, + 287 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Recent advancements demonstrate significant improvements over conventional RAG approaches. The Agentic Reasoning framework achieves granular control through dynamic tool invocation, eliminating predefined execution sequences. DeepRAG [24] optimizes cost-accuracy tradeoffs via MDP-based imitation learning, addressing the retrieval-generation disconnection in traditional systems. CoRAG [83] introduces hybrid-driven mechanisms combining LLM-initiated subqueries with external policy control, enhancing error tolerance for complex queries. Collectively, these approaches establish a paradigm shift from fixed pipelines to context-sensitive, self-optimizing reasoning architectures.", + "bbox": [ + 511, + 287, + 929, + 469 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.2.2 Reflection-Driven Reasoning. The reflection-driven mechanism represents a dynamic reasoning framework that enables iterative self-evaluation and revision of intermediate outputs through model introspection. Common methods include: (1) a evaluation system combining explicit token prediction and implicit confidence scoring, (2) self-monitoring capabilities through grounding tokens for content-document consistency verification and utility tokens for answer effectiveness assessment, and (3) adaptive routing mechanisms that automatically select single-hop or multi-hop reasoning paths based on contextual complexity. The mathematical formalism of this process can be expressed as:", + "bbox": [ + 511, + 476, + 921, + 657 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {P} = \\bigcup_ {t = 1} ^ {T} \\left[ G \\left(\\mathbf {C} _ {t}\\right)\\rightarrow E \\left(\\mathbf {H} _ {t}, \\mathcal {D}\\right)\\rightarrow \\psi \\left(\\phi \\left(\\mathbf {e} _ {t}\\right), \\tau\\right)\\right] \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 562, + 667, + 913, + 709 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "where $G$ denotes the generation function operating on current context $\\mathbf{c}_t$ , $E$ represents the evaluation function that assesses hidden states $\\mathbf{h}_t$ against external knowledge base $\\mathcal{D}$ , $\\phi$ serves as the confidence mapping function, $\\tau$ is the decision threshold, and $\\psi$ functions as the branch selector.", + "bbox": [ + 511, + 710, + 913, + 785 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In practical implementations like Self-RAG [3], this framework generates candidate responses alongside reflection tokens, computes passage relevance scores (ISREL $\\in$ [0,1]) and factual support metrics (ISSUP), and employs weighted aggregation of token probabilities in $\\phi$ to determine retrieval activation or generation revision through threshold-based $\\delta$ operations. Meanwhile, Open-RAG [38] incorporates hybrid threshold mechanisms and Mixture-of-Experts architecture", + "bbox": [ + 511, + 785, + 913, + 906 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 59, + 346, + 71 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Gao et al.", + "bbox": [ + 857, + 59, + 911, + 71 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "to enforce counterfactual verification through non-retrieval confidence scoring $(\\mathrm{Pr}_{\\mathrm{NoRT}})$ , enabling dynamic expansion of complex reasoning capabilities while preserving base model efficiency. ReaRAG [49] utilizes knowledge-guided reasoning chains combined with external knowledge sources to perform reflection-driven reasoning. In each iteration, it adjusts the reasoning path through the \"Thought-Action-Observation\" paradigm, effectively preventing error propagation and improving answer accuracy.", + "bbox": [ + 81, + 90, + 482, + 226 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The paradigm's innovation lies in reconstructing traditional sequential processes into conditional Markov decision processes, where state transition probabilities $P(s_{t + 1}|s_t)$ are dynamically determined by model self-evaluation outcomes. Compared to proactive LLM-driven methods (e.g., Toolformer's direct API invocation), the reflection-driven approach establishes closed-loop control through explicit evaluation stages (function $E$ ), effectively mitigating hallucination risks while maintaining computational efficiency.", + "bbox": [ + 81, + 227, + 482, + 363 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4.2.3 Feedback-Driven Reasoning. The feedback-driven dynamic RAG system establishes closed-loop control over reasoning processes through external signals, formally modeled as a Partially Observable Markov Decision Process. The system state $s_t = (q_t, \\mathcal{K}_t, H_t)$ evolves through iterative interactions, comprising the current query representation $q_t$ , dynamic knowledge base $\\mathcal{K}_t$ , and historical trajectory $\\mathcal{H}_t$ . Initialized with $q_0$ and $\\mathcal{K}_0 = \\emptyset$ , the policy function $\\pi(a_t | s_t)$ generates actions from the operational space $\\mathcal{A} = \\{\\text{Retrieive}, \\text{Reason}, \\text{Verify}, \\text{Answer}, \\emptyset\\}$ . State transitions follow $s_{t+1} = \\delta(s_t, a_t)$ with knowledge base updates", + "bbox": [ + 81, + 369, + 482, + 537 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {K} _ {t + 1} = \\mathcal {K} _ {t} \\oplus \\operatorname {R e t r i e v e} \\left(q _ {t}\\right) \\cdot \\mathbb {I} \\left(a _ {t} = \\text {R e t r i e v e}\\right) \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 135, + 541, + 480, + 558 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "where $\\oplus$ denotes incremental updates and $\\mathbb{I}$ represents an indicator function. The reward function $R(s_{t},a_{t},s_{t + 1})\\to r_{t}$ drives policy optimization through", + "bbox": [ + 81, + 561, + 482, + 608 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\pi_ {t + 1} = \\Omega \\left(\\pi_ {t}, \\nabla_ {\\theta} \\mathbb {E} _ {a \\sim \\pi_ {t}} \\left[ R \\left(s _ {t}, a, s _ {t + 1}\\right) \\right]\\right) \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 158, + 613, + 480, + 628 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "forming an adaptive control loop. Three distinct feedback mechanisms emerge within this framework.", + "bbox": [ + 81, + 633, + 482, + 662 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Explicit reward feedback employs specialized models $\\pi_{\\mathrm{reward}}$ for quantitative evaluation, exemplified by RAG-Gym's process rewards [96]. The reward function combines immediate and terminal rewards:", + "bbox": [ + 83, + 664, + 486, + 722 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nr _ {t} = \\lambda_ {1} \\pi_ {\\text {r e w a r d}} \\left(s _ {t}\\right) + \\lambda_ {2} \\mathbb {E} _ {s _ {t + k}} \\left[ \\gamma^ {k} R _ {\\text {t e r m i n a l}} \\right] \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 151, + 727, + 480, + 746 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "with discount factor $\\gamma$ . SmartRAG extends this through policy gradient optimization", + "bbox": [ + 81, + 750, + 482, + 780 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla_ {\\theta} J (\\theta) = \\mathbb {E} _ {\\tau \\sim \\pi_ {\\theta}} [ \\sum_ {t = 0} ^ {T} \\nabla_ {\\theta} \\log \\pi_ {\\theta} (a _ {t} | s _ {t}) \\hat {A} _ {t} ] \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 148, + 784, + 480, + 824 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "where the advantage function $\\hat{A}_t$ integrates temporal feedback.", + "bbox": [ + 81, + 830, + 482, + 858 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Implicit environmental feedback derives from knowledge base validation, as implemented in KBQA-o1's SPARQL verification and SolutionRAG's pruning mechanisms [58].", + "bbox": [ + 81, + 861, + 483, + 906 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "This feedback is formalized as $r_t = \\mathbb{I}(\\mathcal{K}_t\\models q_0)\\cdot c_{\\mathrm{valid}} - \\mathbb{I}(\\bot \\in \\mathcal{K}_t)\\cdot c_{\\mathrm{invalid}}$ with validation function $\\mathbb{I}(\\cdot)$ and penalty coefficients $c$ . ReARTeR [75] introduces threshold-triggered correction: when $r_t < \\tau$ , it activates refinement loops $\\mathcal{K}_{t + 1} = \\mathrm{PEM}(\\mathcal{K}_t,q_0)\\oplus \\mathrm{Retrieve}(\\mathrm{PRM}(s_t))$ .", + "bbox": [ + 513, + 90, + 913, + 167 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Structured rule feedback encodes domain knowledge through differentiable scoring functions. MCTS-KBQA [97] implements depth-attenuated rewards", + "bbox": [ + 513, + 167, + 911, + 212 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nr _ {t} = \\frac {1}{1 + \\alpha d _ {t}} \\sum_ {i = 1} ^ {n} \\mathrm {L L M} _ {\\text {s c o r e}} \\left(a _ {t} ^ {(i)}\\right) \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 607, + 224, + 911, + 263 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "with search depth $d_t$ and decay coefficient $\\alpha$ . CR-Planner's hierarchical critique combines subgoal and execution scores: $r_t^{\\mathrm{total}} = \\beta_1\\pi_{\\mathrm{sub}}(s_t) + \\beta_2\\pi_{\\mathrm{exec}}(a_t|s_t)$ through weighted fusion.", + "bbox": [ + 513, + 276, + 913, + 321 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "These feedback mechanisms interact through a unified strategy update framework, where external feedback-driven approaches achieve controllable optimization of the reasoning process through interpretable feedback signals while maintaining the generative capabilities of LLMs. Overall, the dynamic process of RAG, by endowing the model with autonomy in the reasoning process, not only enhances adaptability to complex tasks but also provides a new solution for efficient reasoning in resource-constrained environments.", + "bbox": [ + 511, + 321, + 913, + 457 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "5 Implementation and Optimization", + "text_level": 1, + "bbox": [ + 514, + 474, + 854, + 491 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Building upon preceding sections, this section systematically analyzes the concrete implementation and optimization strategies for reasoning within the RAG paradigm. In contrast to existing surveys that predominantly focus on posttraining methodologies or isolated LLM reasoning mechanisms, our analysis maintains a dedicated focus on the synergistic integration of RAG with reasoning examining their co-adaptive implementations through a structural lens.", + "bbox": [ + 511, + 494, + 913, + 614 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "5.1 Reasoning Process", + "text_level": 1, + "bbox": [ + 514, + 631, + 692, + 645 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "5.1.1 LLM CoT. Integrating Chain-of-Thought (CoT) reasoning with LLMs is key to combining RAG with complex reasoning tasks. Research shows CoT enhances RAG systems by explicitly guiding multi-step reasoning and dynamically incorporating external knowledge. For example, ActiveRAG [100] uses a \"Self-Inquiry $\\rightarrow$ Knowledge Assimilation $\\rightarrow$ Thought Accommodation\" chain to align knowledge and reasoning: a knowledge assimilation agent merges external documents with LLM memory via operations like association and reflection, creating structured knowledge. Meanwhile, a reasoning adaptation agent refines inference chains from Self-Inquiry to ensure answers align with retrieved knowledge and address reasoning gaps. Similarly, Adaptive-RAG [41] alternates between CoT and retrieval, breaking down multi-hop reasoning into steps such as entity localization and document correlation, refining retrieval and generation based on prior results.", + "bbox": [ + 511, + 648, + 913, + 906 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Synergizing RAG and Reasoning: A Systematic Review", + "bbox": [ + 84, + 59, + 380, + 71 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 651, + 59, + 911, + 71 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/a797186982b7420dcac71a470f6aca1de11923d2ffcfd02c0fb32375430a9b11.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 84, + 87, + 911, + 265 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/cc011ed02bfe16c008fa59b27f259cd658bc4f9700c3e26493d504dd622d891f.jpg", + "image_caption": [ + "Figure 6. Implementation and optimization of the synergy between RAG and Reasoning" + ], + "image_footnote": [], + "bbox": [ + 84, + 268, + 911, + 415 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "At the knowledge and reasoning level, O1-Embedder [101] drives RAG through open-ended long-text reasoning, extending CoT beyond fixed triggers via coherent thought processes like problem decomposition. PlanRAG [48] explicitly uses CoT to produce executable multi-step plans, adjusting operations dynamically through a closed-loop \"plan-execute-feedback\" cycle. Despite different implementations, these methods share two CoT strengths: breaking down complex problems into clear intermediate steps and guiding external knowledge selection through reasoning states. Studies show these approaches outperform traditional RAG in multi-hop QA and knowledge-intensive tasks by enhancing both LLMs' reasoning and adaptability to external knowledge.", + "bbox": [ + 81, + 465, + 483, + 662 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5.1.2 Special Token Prediction. Recent advances active RAG also highlight special token prediction as a key method for dynamically linking external knowledge retrieval with multi-step reasoning [16]. By embedding domain- or action-specific tokens (e.g., '[Web-search]', '[Retrieve=Yes)', `') into LLM vocabularies, models can autonomously trigger tools or self-reflect during text generation. Frameworks like Self-RAG [3] and SmartRAG [20] use dedicated tokens ('Retrieve', 'ISREL', '[RETRIEVE]') to manage retrieval activation, relevance checks, and output verification, turning static reasoning chains into conditional workflows. The innovation lies in predicting these tokens within generated sequences, segmenting tasks into retrieval initiation, document evaluation, and knowledge grounding phases.", + "bbox": [ + 81, + 695, + 514, + 906 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Hybrid models such as Open-RAG [38] combine token control with mixture-of-experts (MoE) routing, sparsely activating experts aligned with token-predicted reasoning. Unlike traditional chain-of-thought or search tree methods, special token prediction offers finer control and interpretability by encoding decision logic explicitly in token sequences while maintaining end-to-end training. This approach also overcomes latency and inflexibility of preset retrieval schedules by enabling context-aware, on-demand tool use. For example, R1-Searcher [72] and Search-o1 [51] use token boundaries like `' to coordinate retrieval pauses and resume generation after knowledge integration.", + "bbox": [ + 511, + 465, + 913, + 647 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Together, these systems show that token-level prediction not only bridges reasoning and retrieval but also creates a scalable framework for tool-enhanced language agents, preserving generative fluency while enabling systematic external knowledge integration and procedural reasoning.", + "bbox": [ + 511, + 647, + 913, + 723 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "5.1.3 Search-Driven Reasoning. Recent advancements in search-driven reasoning have significantly improved RAG frameworks by employing structured search strategies for dynamic information exploration and multi-step reasoning with external knowledge. Current approaches mainly follow three paradigms: tree-based search, MCTS, and reinforcement learning-optimized policy networks.", + "bbox": [ + 511, + 755, + 913, + 861 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Tree-based methods organize reasoning hierarchically through structured path exploration. For example, StePO-Rec [5] uses a multi-step tree-structured reasoning method", + "bbox": [ + 511, + 861, + 913, + 906 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 59, + 346, + 71 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Gao et al.", + "bbox": [ + 857, + 59, + 911, + 71 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "that iteratively retrieves different outfit matching knowledge and user preferences at each node, ultimately achieving generative recommendations for complementary items. OmniThink [94] uses an information tree to expand topic analysis by generating subqueries that guide breadth-first or depth-first retrievals. DeepRAG [24] applies a binary tree search within a Markov decision process to explore parametric knowledge and retrieval paths in parallel, selecting optimal branches. DeepSolution's [54] bidirectional thinking tree alternates expanding solution and critique nodes with scoring for path pruning, aligning naturally with MCTS evaluation. These methods balance exploration efficiency with solution coverage through explicit tree structures.", + "bbox": [ + 86, + 90, + 480, + 286 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "MCTS enhances robustness by optimizing long-term decisions via simulation, evaluation, and backpropagation. CR-Planner [52] integrates MCTS with the UCB strategy to balance exploration and exploitation while estimating optimal subgoals through multi-step simulations. KBQA-O1 [58] and MCTS-KBQA [97] generate candidate actions using policy models and combine reward models to globally assess logical forms, reducing local optima. ReARTeR [75] innovatively merges MCTS with procedural reward models (PRMs), interleaving retrieval and reasoning steps, and filtering high-reward paths to form a closed-loop \"reason-retrieve-reason\" cycle. These methods probabilistically explore paths and use reinforcement learning feedback to improve global reasoning for complex tasks.", + "bbox": [ + 86, + 287, + 480, + 498 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Reinforcement learning-optimized policy networks adaptively refine search strategies. LeReT [34] replaces fixed search algorithms with reinforcement learning (e.g., IPO) to dynamically optimize query generation based on rewards like retrieval accuracy, implicitly learning optimal search patterns without explicit tree or graph structures, thus offering greater flexibility and scalability.", + "bbox": [ + 86, + 500, + 480, + 604 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "In summary, search-driven reasoning unites inference and retrieval through structured strategies, combining multi-path exploration, dynamic evaluation, and adaptive optimization to deliver interpretable, efficient solutions for knowledge-intensive tasks. Future work may focus on hybrid paradigms (e.g., integrating MCTS and reinforcement learning) and lightweight algorithms to balance performance with computational efficiency.", + "bbox": [ + 86, + 604, + 480, + 724 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "5.1.4 Reasoning on Graph. Graph-structured reasoning offers a novel approach for multi-hop inference in RAG systems by explicitly modeling knowledge interaction paths through topology. Current methods fall into two categories: query-flow-oriented search graphs (e.g. FinSearch [50]) and knowledge-association-based expansion graphs (ToG-2.0 [60]) FinSearch builds a directed acyclic graph (DAG) where nodes are atomic subqueries (e.g., stock prices, financial reports) and edges capture logical and temporal dependencies. A pre-planner breaks down queries into subquery sequences,", + "bbox": [ + 86, + 756, + 486, + 906 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "using graph traversal to control information flow and dynamically adjust paths, such as backtracking when conflicts arise—substantially surpassing linear chain-of-thought methods in handling complex logic.", + "bbox": [ + 517, + 90, + 911, + 150 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "5.1.5 External Solver. The integration of RAG and reasoning is also can be achieved by incorporating external solvers, where specialized solvers, such as the Alignment-Oriented LLM-based Retrieval Method (ARM), are employed to handle the reasoning component. The retrieval process for complex problems is formulated as a global optimization task, leveraging external solvers like mixed-integer programming (MIP) to achieve structural alignment and joint optimization of data objects. Specifically, ARM first decomposes user queries into keywords that match N-grams in the dataset through an information alignment module, generating an initial set of retrieval candidates via constrained decoding. Subsequently, in the structural alignment phase, the MIP solver performs global filtering on candidate objects based on a predefined objective function that maximizes both the relevance of retrieved objects to the query and their mutual compatibility. This ensures that the selected objects not only cover the requirements of the query but also form a coherent information chain through entity or inter-table linkages. Finally, the self-verification mechanism of the LLM, combined with a beam search-based aggregation strategy, dynamically refines and consolidates multiple candidate sets, ultimately producing a retrieval collection that satisfies both semantic matching and the structural organization of the data.", + "bbox": [ + 517, + 167, + 911, + 527 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "ToG-2.0 achieves multi-hop expansion by integrating knowledge graphs with documents, starting from an initial entity and iteratively extending relevant entities and relations (such as corporate ownership chains and technology dependency networks) via the Edge function. This process constructs structured triple paths while simultaneously retrieving and verifying document content. By tuning the width and depth parameters, the method emulates human reasoning: broadly exploring potential associations before deeply verifying high-confidence paths. FRAG [23] dynamically adjusts retrieval strategies by predicting the hop range of reasoning paths based solely on the query text, thereby enhancing retrieval quality without requiring additional fine-tuning or invocation of large language models, enabling flexible and efficient retrieval optimization. FG-RAG [32] further expands entity coverage in graph retrieval through context-aware entity expansion, providing richer background information. Combined with query-level fine-grained summary generation, FG-RAG transforms coarse-grained graph information into highly relevant detailed content, effectively improving the performance of query-focused summarization tasks.", + "bbox": [ + 517, + 529, + 911, + 844 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Although differing in design from workflow-based methods, ToG-2.0 shares key advantages with other graph-structured approaches: explicitly modeling reasoning state dependencies, supporting dynamic path generation and optimization,", + "bbox": [ + 517, + 845, + 926, + 906 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Synergizing RAG and Reasoning: A Systematic Review", + "bbox": [ + 84, + 60, + 380, + 71 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 653, + 60, + 911, + 71 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "and enabling closed-loop interaction between retrieval and reasoning. This effectively overcomes the limitations of traditional RAG in implicit relation inference and counterfactual analysis, thereby establishing an interpretable theoretical and practical framework for knowledge reasoning.", + "bbox": [ + 81, + 90, + 480, + 167 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "5.2 Reasoning Optimization", + "text_level": 1, + "bbox": [ + 83, + 181, + 305, + 196 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In the previous chapter, we focused on introducing several approaches to integrate reasoning with RAG. This chapter shifts attention to how to augment the reasoning capabilities, specifically including Prompt-Based, Tuning-Based, and RL-Based strategies.", + "bbox": [ + 81, + 199, + 480, + 275 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "5.2.1 Prompt-Based. Prompt-Based optimization is a key approach to improving RAG and reasoning system performance by using carefully designed natural language prompts. These prompts break down complex reasoning tasks into manageable steps and guide LLMs to follow specific logical structures during generation. The main advantage is that control over reasoning flow is achieved solely through prompt design, without parameter fine-tuning or reinforcement learning, preserving the model's generalization while enhancing task-specific results.", + "bbox": [ + 81, + 287, + 482, + 436 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "This approach has three main features. First, task structuring: prompts explicitly decompose and control reasoning chains via zero-shot or templated designs. Techniques like Co-STORM [43] and WriteHere [98] use role assignments, stage divisions, and operation-specific instructions to guide multi-step reasoning—such as proposal generation, knowledge retrieval, refinement, and validation—improving interpretability by representing intermediate steps clearly.", + "bbox": [ + 81, + 438, + 482, + 559 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Second, result reliability is improved by standardizing outputs and reducing hallucinations. Strategies include requiring citation of retrieval results, enforcing specific output formats, and integrating reflection and calibration based on retrieved knowledge. Systems like FinSearch [50] and ActiveRAG [100] incorporate temporal weighting, deduplication, and domain rules through prompts, enhancing consistency and logical coherence, especially in complex domains.", + "bbox": [ + 81, + 559, + 482, + 679 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Third, interactive adaptability allows dynamic prompt adjustments. Special tokens (e.g., , [Web-search]) enable models to trigger tools or revise queries in real time based on intermediate results. Methods such as Agentic Reasoning [92] and PlanRAG [48] use context-sensitive prompts and feedback loops to refine reasoning paths dynamically, maintaining coherence and accuracy in multi-hop tasks and outperforming traditional RAG methods in complex, evolving scenarios.", + "bbox": [ + 81, + 680, + 482, + 815 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In summary, prompt-based optimization offers an efficient, flexible, and reliable approach to enhancing RAG+Reasoning by emphasizing task structuring, result standardization, and interactive adaptability. Its non-intrusive and broadly applicable design has established it as a mainstream strategy for optimizing LLM reasoning and serves as a foundation", + "bbox": [ + 81, + 816, + 482, + 906 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "for future hybrid methods integrating fine-tuning and reinforcement learning. By systematically optimizing reasoning without altering model parameters through semantic structures, dynamic feedback, and symbolic constraints, this paradigm effectively manages macro-level controls like task decomposition and knowledge integration while addressing key challenges such as generation consistency, logical coherence, and external knowledge alignment. This makes prompt-based optimization a lightweight yet powerful solution for complex reasoning tasks.", + "bbox": [ + 511, + 90, + 913, + 242 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "5.2.2 Tuning-Based. The tuning-based approach improves the integration of RAG and reasoning by optimizing model parameters to internalize the retrieval-augmented chain-of-thought mechanism within LLMs. Current research mainly targets three goals: retrieval pathway optimization, structured generation enhancement, and collaborative training with external modules.", + "bbox": [ + 511, + 257, + 913, + 359 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "For retrieval pathway optimization, methods like CoRAG [83] and DeepRAG [24] build end-to-end multistep reasoning frameworks through full parameter fine-tuning and multitask learning. CoRAG expands single-step QA datasets into retrieval-reasoning chains and jointly trains tasks such as sub-query generation, intermediate answer prediction, and final composition. This boosts the model's ability to break down complex problems (e.g., multi-entity relational reasoning) and adapt retrieval strategies dynamically (e.g., query rewriting, error correction). DeepRAG combines imitation and contrastive learning with binary tree search to create efficient retrieval paths, using a DPO-style contrastive loss to reduce redundant retrieval while maintaining accuracy.", + "bbox": [ + 511, + 362, + 928, + 558 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "To improve structured generation, MCTS-KBQA [97] and Self-RAG [3] fine-tune models for precise special token generation. MCTS-KBQA uses supervised fine-tuning to make large language models output instructions that comply with knowledge graph protocols (e.g., SPARQL), modeling reasoning as executable tool-call sequences. Self-RAG enhances self-supervised generation control by expanding vocabulary and training the model to generate reflection tokens like retrieval triggers and relevance markers, preserving fluency and reducing factual errors. Additionally, O1-Embedder [101] and Open-RAG [38] align semantic spaces via mixed fine-tuning: O1-Embedder combines generative and contrastive training with special tokens to separate generation from embedding tasks, enhancing multihop semantic understanding; Open-RAG uses QLoRA [17] quantized fine-tuning and Mixture of Experts (MoE) modules to specialize networks for single/multi-hop reasoning.", + "bbox": [ + 511, + 559, + 913, + 815 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In collaborative optimization with external modules, AdaptiveRAG [41] and CR-Planner [52] apply parameter isolation to balance generality and adaptability. AdaptiveRAG finetunes a lightweight classifier to select retrieval strategies dynamically. CR-Planner introduces a Critic model trained with contrastive loss on MCTS trajectory data to assess the", + "bbox": [ + 511, + 816, + 913, + 906 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 59, + 346, + 71 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Gao et al.", + "bbox": [ + 857, + 59, + 911, + 70 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/7887c1ca647a9f89e6f4474b770fc82df542d6a89ee571cf004fffbd171081d9.jpg", + "table_caption": [ + "Table 1. Comparison of RL-based RAG with Reasoning Methods" + ], + "table_footnote": [ + "1ORM: Outcome-based Reward Model; PRM: Process-based Reward Model. 2Full: Full parameter tuning." + ], + "table_body": "
MethodBase ModelRLParameterSupervisionReward FunctionPolicy Strategy
PORAG [73]Qwen2.5/Llama3.2GRPOQLRAORMDual rewards: \n1. Retrieval fidelity (Rfid) \n2. Response quality (Rqual) \nCombined: R = αRfid + βRqual• Group-based advantage normalization \n• PPO-style clipped objective \n• KL regularization
DeepResearcher [106]Qwen2.5-7BGRPOFullORMFormat compliance penalty (-1) + Answer F1 score• Reference policy constraints \n• KL divergence penalty
ReSearch [6]Qwen2.5-7BGRPOFullORMHybrid rewards: \n• Answer F1 (vs ground truth) \n• Format compliance check• GRPO with clip ratio 0.2 \n• Group advantage normalization (G=5) \n• β = 0.001 KL penalty
ReZero [16]Llama3.2-3BGRPOFullORM+PRM• Answer correctness \n• Format compliance \n• Search diversity \n• Chunk matching \n• Retry behavior \n• Strategy compliance• Intra-group reward comparison \n• Noise-injected robustness training \n• KL constraints
MMOA-RAG [12]Llama-3-8BMAPPOFullORMShared F1 reward + penalties: \n• Excessive sub-questions \n• Document ID errors \n• Answer hesitability• MAPPO actor-critic updates \n• Cosine learning rate scheduling
DeepNote [84]Qwen2.5/Llama3.1DPOFullORMImplicit preference modeling via likelihood contrast• Direct Preference Optimization \n• Preference gap maximization
R1-Searcher [72]Qwen2.5/Llama3.1Reinforce++FullORMTwo-stage rewards: \n1. Retrieval count + format \n2. F1 score + format penalty• RAG-based rollout \n• Retrieval-masked loss
KBQA-O1 [58]Llama3/Qwen2.5/Gemma2MCTSDoRAORM+PRMComposite reward: \n• Stepwise policy model score \n• Final reward model score• MCTS trajectory optimization \n• Q-value backpropagation
DeepRetrieval [42]Qwen2.5-3BPPOFullORMTask metrics: \n• Recall@k/NDCG \n• Syntax validity• GAE advantage estimation \n• Distributed HybridFlow
LeReT [34]Llama3-8B/Gemma-9BIPOFullPRMAverage Precision (AP) of retrieved documents• Identity Policy Optimization \n• Context distillation
SmartRAG [20]Flan-T5-L/Llama2-7BPPOFull/LoRAORMAction-specific: \n• EM+F1 for answers \n• Cost penalty for retrievals• On-policy sampling \n• PPO updates
ReARTeR [75]LLaMA3.1-8BMCTSLoRAORM+PRMMonte Carlo step scoring + TD look-ahead• Iterative preference optimization \n• KTO loss
DeepRAG [24]Qwen2.5-7B/Llama3.1-8BHybridFullORM+PRMCost-aware accuracy: \nR = -C(o) × T(st) \nC(o): Answer correctness \nT(st): Total retrieval cost• Imitation + contrastive learning \n• PPO-like calibration
RAG-Gym [96]LLaMA3.1-8BHybridLoRAPRMTriple criteria: \n• Sufficiency \n• Utility \n• Redundancy• SFT + DPO \n• PRM-guided selection
CR-Planner [52]Skywork-Llama3.1-8BMCTSLoRAPRMCritic-estimated rewards: \n• Stepwise correctness \n• Global impact• MCTS simulation \n• Pairwise ranking loss
", + "bbox": [ + 84, + 117, + 915, + 719 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "long-term value of reasoning actions, prioritizing efficient solutions in tasks like mathematical reasoning.", + "bbox": [ + 81, + 760, + 480, + 789 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Together, these tuning strategies restructure the parameter space to internalize retrieval-reasoning interactions effectively, enhancing the model's ability to solve complex problems while ensuring computational efficiency and broad applicability across domains.", + "bbox": [ + 81, + 790, + 482, + 864 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "5.2.3 RL-Based. As shown in Table 1, Reinforcement learning (RL) has recently become pivotal for tackling long-chain", + "bbox": [ + 83, + 876, + 485, + 906 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "reasoning in modern inference models and optimizing RAG combined with reasoning tasks. Central to these advances is the use of dynamic reward mechanisms that guide LLMs to balance knowledge retrieval and logical reasoning adaptively. RL optimization objectives generally fall into two categories: outcome-based reward modeling (ORM) and process-based reward modeling (PRM), with some hybrid approaches blending both to balance global goals and local optimizations.", + "bbox": [ + 511, + 760, + 915, + 881 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Synergizing RAG and Reasoning: A Systematic Review", + "bbox": [ + 84, + 59, + 380, + 71 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 651, + 59, + 913, + 71 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The ORM paradigm focuses solely on the quality of the final output and its adherence to standards. For example, R1-Searcher [72] employs a two-stage Reinforce++ [35] training where rewards in the first stage depend on correct retrieval calls and special token generation, while the second stage directly optimizes the F1 score of answers. This encourages the model to develop strategies maximizing knowledge integration, reducing hallucinations, and enhancing accuracy in multi-hop QA beyond traditional RAG methods. Similarly, KBQA-O1 [58] uses MCTS with a policy network for candidate reasoning paths and a reward model evaluating logical consistency, effectively balancing exploration and exploitation in knowledge base QA.", + "bbox": [ + 81, + 90, + 480, + 287 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Conversely, PRM emphasizes detailed supervision of intermediate reasoning steps. LeReT [34] uses the Identity Policy Optimization (IPO) algorithm, optimizing query quality by rewarding average precision (AP) of retrieved documents, boosting retrieval recall and overall multi-hop task performance. ReARTeR [75] extends this with a step-level binary reward model, combining Monte Carlo scoring and temporal difference (TD) methods to evaluate reasoning paths proactively, reducing logical errors and redundant retrievals, and improving accuracy on benchmarks like HotpotQA.", + "bbox": [ + 81, + 287, + 482, + 438 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Moreover, influenced by DeepSeek-R1, GRPO [69] is also gradually being applied in scenarios combining RAG and Reasoning. GRPO is a variant of the Proximal Policy Optimization (PPO) reinforcement learning algorithm that abandons the critic model and instead estimates the baseline from group scores, significantly reducing training resources. For example, ReZero [16] uses GRPO to introduce a \"retry\" mechanism for LLMs, incentivizing LLMs to keep trying after an initial search failure by rewarding retry search queries. This mechanism simulates the human strategy of \"if at first you don't succeed, try again\" in information retrieval. PORAG [73], based on GRPO, directly optimizes retrieval quality, contextual relevance, and generation coherence through a dual reward mechanism (retrieval fidelity and response quality).", + "bbox": [ + 81, + 438, + 486, + 648 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Hybrid methods merge ORM and PRM to optimize both final outcomes and intermediate steps via composite rewards. SmartRAG [20] applies Proximal Policy Optimization (PPO), combining answer-level F1 rewards with penalties for excessive retrievals, balancing knowledge completeness and efficiency. RAG-Gym [96] advances this with multidimensional process rewards (sufficiency, utility, redundancy) and techniques like contrastive loss and Best-of-N sampling to promote efficient search decisions, even zero-shot. These hybrid strategies markedly lower retrieval costs while sustaining accuracy in complex tasks.", + "bbox": [ + 81, + 648, + 482, + 815 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In addition, we can also observe that in current RL-based methods, academia focuses more on exploration with small-scale LLMs (<8B), among which the Qwen and Llama series are the most widely used. Overall, RL provides a flexible, scalable framework for integrating RAG and reasoning. ORM", + "bbox": [ + 81, + 816, + 482, + 892 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "guides the discovery of globally optimal strategies, PRM enhances reasoning robustness via local refinements, and their combination addresses modular system limits. Future work may explore collaborative rewards in multi-agent settings, offline RL based on world models, and hierarchical reward decomposition for open-domain applications.", + "bbox": [ + 511, + 90, + 913, + 181 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "6 Downstream Tasks and Evaluation", + "text_level": 1, + "bbox": [ + 513, + 195, + 856, + 210 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "While previous chapters focused on methodologies and advances in RAG combined with reasoning, this chapter shifts to tasks and evaluation. It provides a comprehensive overview and analysis of existing tasks, datasets, their current status, and emerging trends. By reviewing these resources, we highlight the landscape's gaps and limitations in current evaluation methods. The chapter also explores key challenges in assessment frameworks, identifying shortcomings and suggesting potential improvements.", + "bbox": [ + 511, + 215, + 913, + 351 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/1e880ccde31a88477a8599518908e126cc979da6226fba098d13340d6687a5c6.jpg", + "image_caption": [ + "Figure 7. The current downstream tasks and datasets related to the combination of RAG and Reasoning show that multi-hop question answering tasks still dominate. Correspondingly, HotpotQA, 2WikiMultihopQA, and MuSiQue remain the most commonly used evaluation datasets." + ], + "image_footnote": [], + "bbox": [ + 517, + 367, + 911, + 628 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "6.1 Knowledge-Intensive Tasks", + "text_level": 1, + "bbox": [ + 513, + 752, + 759, + 766 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In the evaluation for RAG systems, knowledge-intensive question answering (QA) remains the primary focus (Figure 7). As LLMs improve in semantic understanding and reasoning, benchmarks have expanded to cover tasks from simple fact retrieval to complex multi-step reasoning. However, evaluation methods specifically designed for RAG lag behind due to the dual challenge of assessing both retrieval-generation coherence and adaptability to dynamic knowledge bases. For example, multi-hop QA requires integrating", + "bbox": [ + 511, + 770, + 913, + 906 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 59, + 346, + 71 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Gao et al.", + "bbox": [ + 857, + 59, + 911, + 70 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "dispersed knowledge through multi-stage retrieval while verifying logical consistency between answers and retrieval paths. This complexity increases dataset construction costs compared to purely generative tasks, keeping research centered on knowledge-intensive QA subcategories such as open-domain QA, knowledge-base QA, and multi-hop QA.", + "bbox": [ + 81, + 90, + 480, + 181 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Commonly used datasets include Natural Questions (NQ) [47] for single-hop factual queries, HotpotQA, 2WikiMultiHopQA [31] and Musique [79] for multi-hop QA. These benchmarks are mostly based on Wikipedia and fail to reflect the RAG demands and corresponding complexity in real-world scenarios. Some efforts have pushed evaluation boundaries, like CRUD-RAG's [59] operational metrics and DomainRAG's [86] domain-specific evaluations, but high costs and metric-task interdependencies limit progress. As a result, knowledge-intensive QA remains central for testing RAG robustness and practicality, highlighting a critical bottleneck: the need for innovative frameworks that balance retrieval flexibility and controlled generation to support new developments like Agentic RAG. Overall, many evaluation benchmarks are lagging behind rapid RAG+Reasoning advances, especially as LLMs grow more powerful. Specifically, the current evaluation of RAG faces the following challenges.", + "bbox": [ + 81, + 181, + 506, + 439 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Limited Challenge. With improving LLM capabilities, many knowledge-based questions are no longer difficult, as they can be answered without external retrieval. Current multi-hop reasoning datasets, often built from artificial templates, offer limited challenge. There is an urgent need for more complex datasets reflecting real-world scenarios and practical use.", + "bbox": [ + 81, + 449, + 480, + 555 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Lack of Specificity. Existing evaluation tasks are still predominantly focused on factual assessment and knowledge retrieval, lacking evaluations that probe deeper analytical thinking. This constraint limits the ability to measure a model's capacity for profound reasoning and cognitive depth.", + "bbox": [ + 81, + 566, + 480, + 657 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Task Uniformity. The majority of benchmarks are overly dependent on QA tasks, focusing on reactive, question-and-answer-based interactions. There is a pressing need to introduce tasks aligned with real-world applications, such as active information retrieval tasks based on personal knowledge or proactive knowledge discovery.", + "bbox": [ + 81, + 667, + 480, + 758 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Insufficient Dimensions. Evaluations are primarily end-to-end, focusing solely on final outcomes. However, with the introduction of reasoning processes, RAG+Reasoning systems have become iterative, multi-step frameworks. Current evaluations are unable to assess intermediate reasoning steps or retrieval chains effectively. The absence of step-by-step supervision data limits both research and training of related methods. Furthermore, current evaluation methodologies lack comprehensive assessments of system performance", + "bbox": [ + 81, + 770, + 480, + 906 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "trade-offs, such as computational cost and efficiency, which are critical for practical deployment.", + "bbox": [ + 513, + 90, + 911, + 121 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "This emergent landscape necessitates the creation of a new generation of evaluation frameworks that can address these shortcomings. Such frameworks must not only ensure the adaptability of retrieval and the controllability of generation but also integrate intermediate reasoning evaluation and efficiency metrics, paving the way for the development of more robust and efficient RAG systems suited to diverse real-world applications.", + "bbox": [ + 511, + 121, + 913, + 242 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "6.2 New Tasks on RAG+Reasoning", + "text_level": 1, + "bbox": [ + 513, + 257, + 784, + 272 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Recently, combining RAG with reasoning has significantly improved models' ability to tackle more realistic and challenging tasks, raising the standards for evaluation methods. This subsection examines emerging tasks that assess their combined strengths, related tasks and datasets are shown in Table 2. Here, \"emerging\" refers not to entirely new tasks but to those with unprecedented complexity and demands. These include Deep Research tasks requiring multi-layered information integration and reasoning; PhD (Expert)-Level Complex Reasoning tasks targeting advanced scenario reasoning; and critical; domain-specific decision support tasks like medical diagnosis and legal analysis. Such tasks demand not only external knowledge retrieval but also logical consistency, coherence, and depth in reasoning.", + "bbox": [ + 511, + 276, + 913, + 487 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "6.2.1 Deep Research. From the perspective of integrating RAG and reasoning, Deep Research tasks exemplify complex downstream applications. They require models to handle open-ended retrieval, produce long-form, structured text, and synthesize multi-source information through deep reasoning. This section analyzes their key features, evaluation datasets, and metrics.", + "bbox": [ + 511, + 500, + 913, + 603 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "At the core of Deep Research tasks lies the mission of addressing complex informational queries. These tasks are distinguished by several key attributes:", + "bbox": [ + 511, + 604, + 913, + 648 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "First, dynamic interactivity is essential. Models engage in iterative dialogue to uncover latent user needs or \"unknown unknowns\". For example, the Co-Storm [43] framework enables collaboration with multiple language model agents to explore information gradually, easing user cognitive load and capturing unmet needs more accurately.", + "bbox": [ + 511, + 648, + 913, + 739 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Second, integrating information from multiple sources is crucial. Models must consolidate diverse data to provide comprehensive coverage. For instance, uses dynamic mind maps to structure knowledge and produce cohesive reports, ensuring accuracy and completeness.", + "bbox": [ + 511, + 739, + 913, + 815 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Third, expert-level accuracy is required. Many tasks demand domain expertise, expecting models to perform like human specialists. The Agentic Reasoning [92] framework illustrates this with high-stakes scenarios like medical treatment design or legal analysis, where outputs are judged on correctness, depth, and coherence.", + "bbox": [ + 511, + 816, + 913, + 906 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Synergizing RAG and Reasoning: A Systematic Review", + "bbox": [ + 84, + 59, + 380, + 71 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 651, + 59, + 911, + 71 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/a92e1ff229df312dcac56e5cfb3e551bc2263f588f8e2558c8d7b588227aff0d.jpg", + "table_caption": [ + "Table 2. Tasks and Datasets under the New Trend of RAG Combined with Reasoning" + ], + "table_footnote": [], + "table_body": "
Task TypeSub-TaskDatasetDescriptionScaleConstruction ByEvaluationPaper
Deep ResearchDeep ResearchAgentic ReasoningPHD-level dataset covering finance, medicine, and law.15-30 domainsPhD ExpertsExpert pass rate[92]
Report Genera-tionWildSeek [44]Info-seeking task-goal pairs for document generation.100 samplesRules/LLM/ManualLLM[98]
Report Genera-tionTELL ME A STORY [37]fiction writing evaluation dataset: detailed prompts and long-form narratives.230 samplesManualLLM[98]
Peer ReviewReview-5k [91]ICLR 2024 peer review dataset: paper metadata and structured reviewer feedback.4,991 papersOpenReview/arXivMSE/MAE/Acc[91]
Report Genera-tionResearch-14k [91]2022-2024 Accepted ML pa-pers: outlines, full texts, and cited abstracts.14,911 papersSemantic Scholar + arXivSimulated review scores[91]
Report Genera-tionSolutionBench [54]Engineering benchmark: constrained solutions across 8 real-world domains.1,050 datapointsManual/LLM ex-tractionAnalytical/ Tech-nical scores[54]
Mathematics & ReasoningMath ReasoningGPQA [67]PHD-level MCQs in physics, chemistry, and biology.744 setsPhD ExpertsAccuracy[92]
Math ReasoningMATH500 [55]500 math problems from the MATH test set.500 problemsPublic reposPass@K[51]
ProgrammingLiveCodeBench [40]Programming benchmark with easy, medium, and hard problems.1,055 problemsCompetition plat-formsPass@K[51]
ProgrammingUSACO [70]USA Computing Olympiad problems, testing algorithms and coding.307 problemsUSA Computing OlympiadPass@K[52]
Math ReasoningTheoremQA-Math [33]BRIGHT subset: theorem-based math problems.206 problemsSTEM datasetsAccuracy[52]
ProgrammingGorilla [64]API-aware code generation from HuggingFace, Torch Hub, TensorFlow Hub docs.1,600 APIsManualAST matching[73]
Math ReasoningOlympiadBench [29]Olympiad-level math compe-tition problems.1,000 problemsCompetitionsAccuracy/F1[109]
Complex Reason-ingComplexWebQA [76]Multi-step reasoning over web queries with cross-document integration.34,689 queriesWeb snippetsAccuracy[36]
Demanding RetrievalDomain RetrievalStackEcon & Stack-Bio [33]Biology and economics StackExchange questions for complex retrieval.206 queriesStackExchangenDCG@K[52]
Active RetrievalAR-Bench [14]Active retrieval benchmark with four sub-tasks.8k/sub-taskSyntheticAccuracy[14]
Real-timeTAQA [104]QA dataset with time-evolving answers.10K-100K rowsHuman-curatedLLM[14]
Real-timeFreshQA [80]Dynamic fact QA benchmark with evolving answers.600 samplesMixed sourcesLLM[14]
Domain RetrievalPubMed [42]PICO-based medical search dataset linking reviews to PubMed.21k+ samplesSystematic re-viewsRecall@K[42]
Domain RetrievalTrial search [42]PICO-based clinical trial search linked to ClinicalTrials.gov.7k+ samplesManuallyRecall@K[42]
Domain RetrievalFinSearchBench-24 [50]Financial retrieval benchmark covering stocks, rates, policy, trends.1,500 queriesManuallyAccuracy[50]
Decision & QABusinessDQA [48]Decision QA benchmark with business scenarios in enterprise settings.301 pairsvideo gamesAccuracy[48]
MedicalCMB-Clin [87]CMB subset for clinical diagnosis reasoning in Chinese medical cases.74 casesTextbooks/diagnosticLLM/Expert materials[11]
MedicalMM-Cases [11]Medicine cases generated by GPT-40-mini, verified by doctors.609 casesLLM/doctor-reviewedLLM/Expert[11]
MedicalTCM-Cases [11]TCM patient cases generated by GPT-40-mini, verified by doctors.130 casesLLM/doctor-reviewedLLM/Expert[11]
", + "bbox": [ + 81, + 116, + 915, + 964 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 59, + 346, + 71 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Gao et al.", + "bbox": [ + 857, + 59, + 911, + 71 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Fourth, multi-modal reasoning is often necessary. Deep Research tasks involve varied data types—text, code, knowledge graphs—and dynamic tool use such as web searches or code execution to enhance reasoning.", + "bbox": [ + 86, + 90, + 480, + 150 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Finally, handling multiple real-world constraints is vital. Tasks may require generating practical solutions under specific conditions, like designing hospitals in challenging environments with factors like heavy rainfall and seismic activity, as seen in the DeepSolution framework. This ensures outputs are feasible and relevant.", + "bbox": [ + 86, + 152, + 480, + 239 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "To ensure the diversity and complexity of Deep Research tasks, their evaluation relies on datasets drawn from multiple domains. A few notable examples include:", + "bbox": [ + 86, + 242, + 480, + 286 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "WildSeek Dataset [44]: This dataset is constructed from real-world user information-seeking scenarios and comprises 100 data points covering 24 fields, including economics, computer science, and law. Each data point is characterized by a topic, user goal, and domain label. For example: \"Domain: Economics; Topic: Development of a Shared Trading Currency; Goal: Investigate how a new shared currency could eliminate transaction costs\". WildSeek effectively evaluates models' competence in dynamic interaction and multi-source information integration.", + "bbox": [ + 86, + 287, + 480, + 436 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "GAIA [62]. The GAIA Benchmark, developed jointly by Meta AI, Hugging Face, and others, is a comprehensive evaluation framework designed to assess general AI assistants' ability to handle real-world problems. It features 466 carefully crafted tasks spanning language reasoning, visual perception, multi-agent collaboration, and adaptability, focusing on key skills like reasoning, multimodal processing, web browsing, and tool use. GAIA measures performance across dimensions such as task execution, adaptability, collaboration, generalization, and real-world reasoning with metrics like completion rate, response quality, efficiency, and robustness. Unlike traditional benchmarks, it emphasizes robustness and reliability in everyday scenarios, supports zero-shot evaluation, prevents data contamination, and is widely used in research and industry to guide AI development.", + "bbox": [ + 86, + 438, + 480, + 664 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "SolutionBench [54]: This dataset spans eight engineering domains, including environmental, mining, and transportation engineering. Each instance presents a complex engineering problem with specific constraints. For example: \"Design a safe and efficient hospital construction plan in a region with 3000mm annual rainfall, expansive soils, and frequent seismic activity.\" SolutionBench evaluates models' ability to address multi-constraint problems and integrate specialized knowledge effectively.", + "bbox": [ + 86, + 665, + 480, + 800 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The current evaluation system for DeepResearch faces the dual challenges of scarce specialized testing tasks and the difficulty of assessing complex, lengthy reports: On one hand, existing benchmark tests only cover basic capabilities and lack systematic evaluation standards in specialized scenarios like business analysis and policy assessment; on the", + "bbox": [ + 86, + 801, + 480, + 890 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "other hand, the multimodal integration, logical chain verification, and domain adaptability testing of long reports pose technical bottlenecks for traditional assessment methods, necessitating the development of new evaluation tools that integrate logic graphs, dynamic scenario simulation, and domain knowledge bases.", + "bbox": [ + 517, + 90, + 911, + 181 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In the future, the evaluation system will evolve into a multidimensional framework, including the construction of a three-level indicator matrix covering basic capabilities, reasoning levels, and application value. Overcoming these evaluation bottlenecks requires both technological innovation and joint standard-building efforts. This concerns not only the reliability validation of intelligent research tools but also the reshaping of research evaluation paradigms and industrial application boundaries.", + "bbox": [ + 517, + 181, + 911, + 316 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "6.2.2 PhD (Expert)-Level Complex Reasoning. The integration of RAG with advanced reasoning has become essential for tackling expert-level, complex cognitive tasks, particularly at the PhD level. These tasks, including competitive programming, theorem-driven proof reasoning, and cross-disciplinary knowledge retrieval, require multi-layered logical inference and precise coordination between dynamic retrieval and domain-specific knowledge. PhD-level reasoning differs from standard evaluations across three dimensions: knowledge intensity, procedural rigor, and domain specificity. Knowledge intensity demands dynamic access to deep, specialized knowledge, such as analyzing dynamic programming time complexity or applying algebraic topology theorems—needs that surpass general corpora and call for domain-specific knowledge graphs and retrieval methods. Procedural rigor involves mathematical precision in multistep proofs, requiring logical consistency in symbolic manipulation, theorem use, and counterexample refutation, as seen in international math competitions. Domain specificity reflects tailored reasoning methods, e.g., handling synchronization in concurrent programming or employing tensor calculus in quantum field theory.", + "bbox": [ + 517, + 333, + 911, + 662 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Evaluation systems for such tasks are inherently multilayered and multimodal. The USACO Benchmark [71] offers a graduated difficulty scale for programming reasoning, testing both correctness and algorithmic constraints like time complexity. TheoremQA-Math [9] links formalized math problems to theorem libraries, demanding verifiable mappings between theorem applications and calculations. Cross-disciplinary datasets like StackBio and StackEcon [53] assess models' ability to extract critical knowledge from dense, domain-rich documents, serving as strong tests for domain-oriented retrieval accuracy.", + "bbox": [ + 517, + 665, + 911, + 830 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Modern evaluation surpasses traditional end-to-end tests by combining process and outcome validation. Frameworks like CR-Planner [52] use dual models—a Sub-Goal Critic to score reasoning chains and an Execution Critic to evaluate retrieval—allowing fine-grained step monitoring. For", + "bbox": [ + 517, + 830, + 911, + 905 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Synergizing RAG and Reasoning: A Systematic Review", + "bbox": [ + 84, + 60, + 379, + 71 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 653, + 60, + 911, + 71 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "example, in dynamic programming, key steps like formulating state transitions and retrieving boundary conditions receive targeted feedback. Similarly, Search-O1 [51] quantifies knowledge completeness by tracking uncertainty indicators (e.g., tentative language), measuring confidence and accuracy. Outcome validation maintains strict correctness benchmarks in programming and combines metrics like F1 scores with expert review in open-domain scientific QA to ensure precise understanding of domain-specific terms.", + "bbox": [ + 86, + 90, + 480, + 224 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "6.3 Challenges and Future Directions", + "text_level": 1, + "bbox": [ + 86, + 239, + 370, + 253 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "6.3.1 Complex Domain Tasks. Recent advances in RAG have provided novel solutions for more complex tasks in professional domains. These downstream tasks transcend the limitations of traditional question-answering models that rely solely on simple retrieval-generation patterns, involving challenges such as real-time information acquisition, integration of domain expertise, and dynamic decision-making support. The nature of these tasks can be characterized along three interrelated dimensions: (1) temporal dynamics, emphasizing the rapid changes in data and reasoning environment; (2) domain specificity, focusing on deep integration of industry knowledge and structured data; and (3) reasoning chain complexity, reflecting requirements for multi-stage reasoning and fine-grained decomposition of queries.", + "bbox": [ + 86, + 258, + 480, + 467 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "To rigorously evaluate such systems, innovative benchmarking approaches have been proposed. The FinSearchBenchmark-24 dataset, for example, encompasses five months of market data variations, integrating multi-variable interactions across stock, policy, and industrial sectors, and includes over 1,500 multiple-choice questions, thereby surpassing the constraints of traditional static benchmarks. The evaluation adopts a hierarchical and quantitative methodology: the foundational level measures model accuracy and response latency; the intermediate layer assesses the temporal sensitivity of information relevance and the contribution of retrieval mechanisms to reasoning outcomes; and the advanced layer employs ablation studies to highlight performance variances under dynamic temporal decay. This multifaceted evaluation not only differentiates surface-level retrieval capabilities but also rigorously measures the synergy between reasoning quality and temporal context, furnishing theoretical and practical foundations for long-term stability and predictive accuracy in complex domain systems.", + "bbox": [ + 86, + 469, + 491, + 753 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Experimental findings further reveal that establishing long-term evaluation protocols with temporal weighting functions is indispensable for adapting to realistic dynamic environments. Nonlinear declines in decision accuracy, observed when extending relevance windows from 72 to 168 hours, emphasize the importance of factoring temporal decay into assessment frameworks. Future work should extend these evaluation protocols to high-stakes domains such as medical diagnostics and legal consultation, where the standardization of interpretability metrics will critically support", + "bbox": [ + 86, + 756, + 480, + 905 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "the evolution of RAG+ reasoning systems toward robust and trustworthy decision-assistance platforms.", + "bbox": [ + 517, + 92, + 910, + 119 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "6.3.2 Decision Support and Active Retrieval. The expansion of RAG+Reasoning frameworks into specialized tasks has fostered two complementary research paradigms: decision optimization and active retrieval. In the decision optimization category, systems must leverage heterogeneous structured data, rule bases, and objective functions to formulate optimal strategies. Representative systems like PlanRAG formalize Decision Question Answering (Decision QA) tasks targeting enterprise-level scenarios including supply chain optimization, industrial resource allocation, and market price regulation. These tasks require planning multimodal reasoning paths where models iteratively retrieve data from relational and graph databases, integrate intricate business rules, and iteratively refine decision-making paths through replanning mechanisms. To evaluate such capabilities, the Decision QA (DQA) benchmark creates dual database versions (MySQL and Neo4j) derived from economic systems in strategy games, assessing cross-structured generalization. The evaluation consists of a three-tier framework: the core tier measures answer accuracy; the intermediate layer diagnoses error types to identify system bottlenecks; and the foundational tier focuses on retrieval efficiency and the impact of replanning frequency. This structured evaluation framework not only tracks performance but also offers actionable insights for system refinement.", + "bbox": [ + 517, + 132, + 911, + 507 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Conversely, the active retrieval evaluation addresses the challenge of dynamically determining when and how to invoke retrieval under complex multimodal contexts. Unlike rigid traditional RAG systems, UAR applies lightweight classifiers for fast, accurate triggers, improving performance in time-sensitive or creative tasks. Tested on AR-Bench, it combines binary trigger accuracy with GPT assessments, exact matches, and human reviews, boosting adaptability across diverse contexts.", + "bbox": [ + 517, + 508, + 911, + 643 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Emerging trends in these evaluation paradigms indicate a shift from static, rule-based frameworks to dynamic system simulations, as exemplified by DQA's use of game engine-generated datasets to simulate realistic environments. Similarly, active retrieval tasks progress from simple retrieval trigger decisions toward collaborative multi-criteria decision-making. Evaluation methodologies are concurrently evolving from singular performance metrics to multidimensional matrices comprising core effectiveness, diagnostic error distributions, and economic cost measures.", + "bbox": [ + 517, + 645, + 911, + 792 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "7 Cost and Risk", + "text_level": 1, + "bbox": [ + 517, + 811, + 669, + 825 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Integrating reasoning into RAG systems is neither effortless nor purely beneficial. Recent trends have exaggerated its advantages while downplaying the costs and risks. This trade-off between performance and cost is crucial. This section examines the expenses and misuse risks linked to adding", + "bbox": [ + 517, + 832, + 911, + 905 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 60, + 344, + 71 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Gao et al.", + "bbox": [ + 857, + 60, + 911, + 70 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/5456685368ffe44fb4c5b81029bd5ef81d13e2a9f1ac24b37bf34bf87ce8844d.jpg", + "image_caption": [ + "Figure 8. From LLM to RAG and then to RAG+Reasoning, performance improvement comes with additional cost." + ], + "image_footnote": [], + "bbox": [ + 86, + 88, + 911, + 335 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "reasoning to RAG systems. As shown in Figure 8, the cost of moving from LLM to RAG, then to RAG + Reasoning, incurs an inevitable \"invisible tax\". Though often hidden by performance gains, this cost is vital in assessing these methods' overall practicality and efficiency.", + "bbox": [ + 81, + 383, + 482, + 458 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "The shift from LLM to RAG moves from simplicity to enhanced knowledge handling by incorporating external information. A basic LLM provides direct, efficient answers with low latency and token use but is limited to pre-trained knowledge, restricting complex or up-to-date queries. RAG overcomes this by adding a vector database for external retrieval, vastly expanding response scope and reliability. However, this requires substantial data processing, storage, and introduces higher latency and token costs due to data chunking, encoding, indexing, and retrieval overhead.", + "bbox": [ + 81, + 458, + 482, + 609 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Advancing from RAG to RAG + Reasoning adds multistep reasoning capabilities, enabling complex task handling, autonomous decisions, and more context-aware responses through intricate reasoning. This comes at the expense of increased delays, token consumption, processing demands, and greater complexity in system integration and maintenance. The reasoning layer's autonomy also brings opaqueness, unpredictability, and heightened security and reliability risks. These challenges highlight the necessity of carefully balancing effectiveness against costs when adopting RAG + Reasoning in real-world applications.", + "bbox": [ + 81, + 609, + 482, + 777 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "7.1 Cost Trade-off in RAG+Reasoning", + "text_level": 1, + "bbox": [ + 83, + 797, + 377, + 811 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Figure 9 illustrates typical works combining RAG and Reasoning, showing retrieval and reasoning demands alongside token consumption. While integrating dynamic knowledge retrieval with multi-step reasoning greatly improves accuracy in more complex tasks, the resulting systemic costs are often underestimated in research and practice. These costs", + "bbox": [ + 81, + 816, + 482, + 906 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "grow non-linearly, causing serious efficiency bottlenecks in real-world use. The tradeoff between effectiveness and efficiency stems from RAG+Reasoning's architecture: multistage task decoupling, dynamic path planning, and intermediate state preservation. These features improve reasoning quality but trigger cascading increases in computational resources, token usage, and reduced retrieval efficiency. This section explores these implicit tradeoffs from the angles of resource use, token consumption, and retrieval efficiency.", + "bbox": [ + 511, + 383, + 915, + 518 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "7.1.1 Non-Linear Growth of Computational Resources. The RAG+Reasoning framework separates retrieval and reasoning into multiple stages, causing computational demands to grow non-linearly. Dynamic chain-of-reasoning methods execute multiple LLM generations and retrievals per inference, resulting in complexity far exceeding baseline models. Fixed-length reasoning chains trigger repeated retrieval and generation calls, increasing resource needs with task complexity. More advanced techniques like MCTS-guided methods add rounds of candidate path generation and evaluation, further multiplying runtime and memory usage on GPUs compared to linear methods. Even simpler multi-step planning tasks incur much higher overhead than single-stage retrieval models due to extra graph construction and analysis. While this resource intensity improves inference accuracy, it poses serious scalability challenges under limited resources as computational costs grow superlinearly with model size, retrieval chain length, and task complexity.", + "7.1.2 Implicit Token Inflation. Multi-step reasoning frameworks inherently cause significant token inflation through iterative intermediate processes like thought chains, retrieved documents, and verification feedback. Active learning setups consolidate multiple intermediate results—retrieved documents, counterfactuals, multi-round validations—leading to" + ], + "bbox": [ + 511, + 531, + 929, + 906 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Synergizing RAG and Reasoning: A Systematic Review", + "bbox": [ + 84, + 59, + 380, + 71 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 651, + 59, + 913, + 71 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/43b9c3e6021b5e7e0d22dfa723c29a2ecc7d344929cf0de432c2f1383653b5ca.jpg", + "image_caption": [ + "Figure 9. Cost quadrant diagram of retrieval and reasoning requirements" + ], + "image_footnote": [], + "bbox": [ + 84, + 88, + 915, + 479 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "token usage well beyond typical limits. Chain-based retrieval also generates token bloat due to exhaustive candidate path exploration. Iterative reasoning path selection, expansion, and evaluation add heavy token overhead in tasks needing deep reasoning chains involving extensive sequence generation and evaluation. Token usage grows exponentially with task complexity and increases further when intermediate reasoning favors depth or breadth. This inflation raises API costs and memory demands, especially in long-text generation like Deep Research [106].", + "bbox": [ + 81, + 527, + 483, + 680 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "7.1.3 Marginal Decline in Retrieval Efficiency. Dynamic retrieval improves knowledge precision but suffers diminishing efficiency as task complexity increases. Adaptive methods reduce retrievals for simple tasks but still require multiple iterations for complex ones, adding significant overhead compared to standard RAG. The tradeoff between retrieval quality and frequency further limits efficiency. High-accuracy retrieval methods incur heavy computational and time costs, negating their efficiency benefits. Even advanced retrieval-trigger optimizations can't fully remove this overhead due to extra training and deployment costs [41]. This natural efficiency ceiling highlights ongoing challenges in balancing retrieval accuracy and resource use, especially in large, complex tasks.", + "bbox": [ + 81, + 695, + 486, + 906 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "7.1.4 Toward a Cost Model Framework. Against this backdrop, the development of fine-grained cost models becomes a necessary precondition for balancing effectiveness and efficiency. Existing evaluation metrics, which often rely on single-task performance indicators (such as Exact Match or F1) or coarse-grained runtime statistics, lack the comprehensiveness to jointly model computational resources, token flow, and retrieval overhead. Consequently, they fail to quantify the true tradeoffs in reasoning mechanisms. For instance, while multi-hop reasoning may improve task accuracy, these improvements are frequently offset by exponential growth in token consumption and latency relative to baseline methods. A fine-grained cost model would enable researchers and practitioners to more accurately evaluate the real benefits of reasoning-centric frameworks while addressing the underexplored interplay between computational cost and task performance.", + "bbox": [ + 511, + 527, + 915, + 785 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "7.2 Potential Risk of Over-Thinking", + "text_level": 1, + "bbox": [ + 513, + 797, + 797, + 811 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "In the process of developing deep thinking models, \"overthinking\" poses a key risk to system efficiency and reliability [10, 15, 19, 30, 74, 81], and this issue is further amplified after combining with RAG. It appears as redundant reasoning steps, excessive validation of known conclusions, or unnecessarily broad retrieval scopes, wasting computational", + "bbox": [ + 511, + 815, + 915, + 906 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 59, + 346, + 71 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Gao et al.", + "bbox": [ + 857, + 59, + 911, + 70 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "resources, increasing error propagation, and degrading performance. For example, in financial risk assessment, an LLM with RAG might retrieve multiple similar market reports and repeatedly verify the same economic indicators rather than focusing on core risks, leading to delayed decisions. This stems from an imbalance between reasoning and retrieval: after accessing external knowledge, the model can enter a \"self-validation loop,\" repeatedly parsing overlapping or contradictory documents. The generation module, seeking reliability, may trigger further retrievals, creating a feedback loop that worsens inefficiency. This issue is critical in real-time systems like medical diagnosis, where over-retrieval of irrelevant literature can delay urgent decisions.", + "bbox": [ + 86, + 90, + 480, + 286 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Case studies show the impact of overthinking [74]. In legal document interpretation, early reasoning errors can amplify through the retrieval-generation loop, causing retrieval along incorrect paths and yielding illogical conclusions. This error propagation is evident in systems like the Search-o1 [51], where flawed information extraction misguides subsequent reasoning. In industrial equipment manual interpretation, overextended reasoning with highly similar documents risks obscuring critical parameter differences, increasing procedural errors. These examples illustrate that overthinking not only hampers knowledge integration but also creates safety hazards in practical applications.", + "bbox": [ + 86, + 287, + 480, + 468 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "To mitigate these risks, researchers propose multiple optimization frameworks. ReaRAG [49] limits reasoning chain length and incorporates self-reflection to prune invalid branches. A simple and effective way is to use a two-stage filtering process, first narrowing documents by metadata, then validating fragment relevance, reducing redundant information—for instance, retrieving only relevant legal clauses rather than entire regulatory texts. The DeepSeek R1 [26] applies reinforcement learning with distillation to penalize redundant steps, cutting repeated formula validation in math proofs by over $40\\%$ . These approaches transform open-ended reasoning into controlled, goal-directed processes, using methods like attention weight analysis to measure information gain or confidence functions to evaluate reasoning paths.", + "bbox": [ + 86, + 469, + 500, + 679 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Current research balances constraints with model creativity. Knowledge graph-guided reasoning is tested in clinical trials to prioritize key medical features over exhaustive literature retrieval [11]. Causal reasoning models aim to break error chains; for example, in financial forecasting, causal graphs restrict reasoning to logically relevant macroeconomic links. Adaptive stopping strategies adjust reasoning depth in customer service—simple queries use preset templates, complex issues activate multi-hop reasoning. These advances reshape retrieval-augmented reasoning, with the core challenge being to develop evaluation frameworks that avoid both \"cognitive stagnation\" from excessive constraints and \"cognitive overload\" from insufficient control.", + "bbox": [ + 86, + 680, + 480, + 875 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Future progress will integrate cognitive science with computational modeling. By mimicking human \"intuition-verification\"", + "bbox": [ + 86, + 876, + 511, + 906 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "decision-making, LLMs could switch seamlessly between rapid response and deep reasoning. In high-risk fields like industrial fault diagnosis, such hybrid models can quickly propose contingency plans after initial retrieval while verifying their validity through deeper analysis. This layered approach reduces overthinking risks and offers a safe, controllable path for applying LLMs in critical industries.", + "bbox": [ + 517, + 90, + 911, + 196 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "8 Practical Guide", + "text_level": 1, + "bbox": [ + 517, + 210, + 683, + 226 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "The combination of RAG and Reasoning is not a one-size-fits-all solution; it requires careful evaluation of each scenario's unique needs. As a rapidly evolving and relatively new field, practical applications are still limited, making best practices hard to define. This chapter abstracts and summarizes the key traits of typical RAG+Reasoning application domains and offers practical guidelines for system design based on these features. It provides recommendations on leveraging RAG's strengths with Reasoning, highlighting priorities, pitfalls to avoid, and current opportunities (Figure 10). The goal is to promote wider adoption and effective use of this technology in diverse, complex real-world settings.", + "bbox": [ + 517, + 231, + 911, + 411 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "8.1 Domain characteristics", + "text_level": 1, + "bbox": [ + 517, + 426, + 725, + 439 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "As illustrated in the left part of Figure 10, we develop a seven-dimensional feature system based on the three core stages of RAG—query, retrieval, and generation—to systematically analyze challenges and adaptation needs across various industries. The query stage emphasizes the complexity of intent understanding and the demand for advanced reasoning, recognizing that industries differ in query abstraction and specificity; some require quickly capturing implicit, deep intentions, while others need complex reasoning. Effective preservation of original semantic meaning during understanding and reasoning is key to improving RAG performance. Retrieval focuses on the system's adaptability to diverse and dynamic knowledge sources, which vary from rich multi-domain data to rapidly updating information; frequent updates and fragmented knowledge present challenges that demand effective integration to ensure consistent support for generation. The generation stage requires high-quality outputs, with strict control over hallucinations—especially critical in sensitive fields like healthcare and law—along with varying latency requirements for real-time or delayed responses. Explainability and traceability at this stage are essential for system credibility and serve as key evaluation metrics. This comprehensive framework reveals technical bottlenecks and guides improvements, and is applied to analyze four representative domains: finance, healthcare, law, and personal assistants.", + "bbox": [ + 517, + 444, + 911, + 835 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "8.1.1 Finance. In the finance domain, user queries typically focus on structured needs like investment decisions and risk forecasting. While intent understanding is moderately complex, the system must perform advanced reasoning amid", + "bbox": [ + 517, + 845, + 911, + 906 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Synergizing RAG and Reasoning: A Systematic Review", + "bbox": [ + 84, + 60, + 379, + 71 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 653, + 60, + 911, + 71 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/7ac1eba9b0f22ec452c189570cac365dfa05f5cba364f27f64c05775b1c82bff.jpg", + "image_caption": [ + "Figure 10. Practical guide to synergizing RAG and Reasoning" + ], + "image_footnote": [], + "bbox": [ + 84, + 87, + 911, + 345 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "rapidly changing market conditions, relying heavily on external knowledge and frequent updates. For example, portfolio return forecasting integrates time series analysis, policy interpretation, and cross-market reasoning. Retrieval demands handling diverse data sources—real-time market data, annual reports, and regulatory filings—with update cycles often measured in minutes. During generation, strict latency and hallucination control are crucial, as outputs must include decision-making suggestions with full data traceability. Investment research reports, for instance, require annotated key indicators, their data sources, and computation logic to ensure transparency and regulatory compliance. High latency control and robust traceability are essential to maintain transparency and adherence to financial regulations.", + "bbox": [ + 81, + 396, + 483, + 608 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "8.1.2 Healthcare. Healthcare queries involve complex medical semantic parsing, often with ambiguous terms or incomplete symptoms. For example, \"persistent chest pain with shortness of breath\" requires multi-hop reasoning across cardiology, pulmonology, and emergency medicine. Retrieval must integrate electronic health records, medical imaging, and up-to-date clinical guidelines. In generation, hallucination tolerance is minimal—errors in drug dosages or protocols risk malpractice. Therefore, accuracy, timeliness, and explainability are paramount, with every decision step traceable and verifiable.", + "8.1.3 Legal Services. Legal consultations often require interpreting statutes and citing cases, balancing precise legal terms with natural language nuances. Retrieval depends on structured, infrequently updated sources like case law databases and local regulations. Generation demands accuracy—for instance, drafting contract clauses must precisely cite specific statutes (e.g., Article 472 of the Civil Code) down" + ], + "bbox": [ + 81, + 619, + 488, + 906 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "to the paragraph level for traceability. Explainability is essential, with traceability usually above $95\\%$ , and probabilistic language avoided to comply with strict judicial documentation standards.", + "bbox": [ + 511, + 395, + 915, + 455 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "8.1.4 Personal Assistants. This domain features diverse, dynamic user needs, including schedule management, real-time navigation, and open-domain conversations. Accurate intent disambiguation through contextual awareness is crucial. Retrieval integrates fragmented sources like user behavior logs, geolocation, and social media. Generation latency varies: weather updates require sub-second responses, while travel planning can tolerate $5+$ seconds. Hallucination tolerance depends on context—creative outputs are acceptable for recipes but not for flight information, which demands full accuracy. This necessitates adaptive verification in the RAG system. Though intent complexity is lower than in healthcare or legal fields, the domain's interaction diversity requires heavy reliance on external knowledge and dynamic balancing of latency and accuracy.", + "bbox": [ + 511, + 473, + 915, + 700 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "8.2 Do's and Don'ts", + "text_level": 1, + "bbox": [ + 513, + 720, + 671, + 734 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Building on aforementioned domain characteristics, we further identify six common scenarios, and derive technical adaptation principles for each. This section outlines key optimization strategies (Do's) and prohibitions (Don'ts), to guide the co-design of RAG and reasoning.", + "8.2.1 Structured Reasoning Scenarios. For scenarios requiring multi-step logical decomposition and structured knowledge dependency, such as portfolio return prediction, Chain-of-Thought (CoT) task decomposition and knowledge graph (KG)-driven graph reasoning approaches should be" + ], + "bbox": [ + 511, + 738, + 915, + 907 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 59, + 346, + 71 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Gao et al.", + "bbox": [ + 857, + 59, + 911, + 71 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "employed. Complex problems should be broken into verifiable sub-tasks, such as coupling market trend analysis with policy impact assessment, while leveraging knowledge graph constraints to ensure logical completeness and auditability. It is essential to incorporate a temporal validation layer to cross-check the consistency of timestamp-sensitive information (e.g., real-time market data or emergent regulatory policies) within a dynamic knowledge base. Approaches that exclude retrieval-based verification of salient features must be avoided, as they may lead to reasoning biases arising from the absence of structured knowledge anchors (e.g., critical indicators from financial statements). Furthermore, the reasoning space of LLMs should be constrained within domain-specific knowledge frameworks to prevent irrelevant or invalid deductions.", + "bbox": [ + 86, + 90, + 480, + 316 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "8.2.2 Dynamic Demand-Responsive Scenarios. For scenarios characterized by rapidly shifting demands and user preference variability, such as itinerary planning and multimodal interaction in personal assistant services, a dynamic adaptation mechanism based on prompt engineering is recommended. By dynamically associating fragmented knowledge units (e.g., user behavior history and real-time traffic updates) with semantic templates and employing heuristic rules for search-space pruning (e.g., prioritizing locally updated information within the past 24 hours), the system can balance contextual adaptability with response speed. Model fine-tuning or reinforcement learning (RLHF/DPO)-based strategy updates should be avoided due to their lengthy iterative cycles and computational overhead, which cannot meet real-time responsiveness requirements, such as millisecond-grade reaction times for last-minute destination changes. Lightweight caching architectures should be implemented within the retrieval system, prioritizing frequently accessed knowledge fragments, such as operating hours of popular tourist attractions, to achieve an equilibrium between dynamism and stability.", + "bbox": [ + 86, + 325, + 482, + 641 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "8.2.3 Deterministic Decision-Making Scenarios. In scenarios requiring a single, reliable conclusion, such as clinical diagnosis generation in the healthcare domain, a multi-level deterministic assurance system should be established. Time-validation layers can filter outdated knowledge (e.g., therapies no longer approved), while field-sensitive retrieval modules trigger predefined decision rules conforming to up-to-date clinical guidelines (e.g., those codified within the latest version of the International Classification of Diseases [ICD]). Knowledge graph path constraints should restrict the reasoning process to validated causal links within medical logic (e.g., linking symptom patterns to laboratory test results within corroborated diagnostic pathways), thereby minimizing the likelihood of deviations from standard protocols. Probabilistic exploration strategies that generate alternative hypotheses (e.g., speculative differential diagnoses for atypical pneumonia) should be strictly disallowed to avoid clinical", + "bbox": [ + 86, + 650, + 482, + 905 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "misjudgments. Additionally, delegating decision-making authority to external classification models must be avoided to maintain end-to-end explainability and a clear causal link in the decision-making pipeline.", + "bbox": [ + 517, + 92, + 911, + 151 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "8.2.4 Time-Sensitive Scenarios. In tasks highly sensitive to response delays, such as real-time risk warnings and trading decisions in the financial sector, heuristic rules should be employed to prioritize indexing of frequently queried knowledge units (e.g., volatility indices and liquidity indicators) at the top of the search hierarchy. Directed retrieval expansion strategies that preload potentially associated information (e.g., contractual clauses of derivative instruments tied to underlying assets) can further reduce latency in multi-turn interactions. Monte Carlo Tree Search (MCTS) and other sample-based algorithms are ill-suited for such scenarios due to the excessive computational complexity caused by branch expansion, rendering them infeasible within tight time constraints (e.g., milliseconds). Similarly, the invocation of complex mathematical solvers (e.g., numerical solutions for stochastic differential equations) can introduce uncontrollable delays and should be replaced with lightweight rule-based mechanisms (e.g., threshold-triggering mechanisms based on historical volatility ranges).", + "bbox": [ + 517, + 161, + 911, + 446 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "8.2.5 Risk-Sensitive Scenarios. For scenarios with minimal tolerance for errors, such as contract clause generation and citation of judicial interpretations in the legal sector, a dual-layer defensive mechanism must be employed. A pre-action review layer should validate the compliance of generated content with statutory standards (e.g., ensuring consistency between liability clauses and Article 577 of the Civil Code), while a reliability validation layer performs cross-referencing validation across multiple sources (e.g., aligning Supreme Court precedents with regional court guidelines) to resolve potential conflicts. Retrieval systems must include version control modules to track and update legal references (e.g., automatically flagging repealed local statutes). Unconstrained reinforcement learning-based text generation methods must be avoided, as their exploratory nature risks violating the normative requirements of legal documents (e.g., generating presumptive liability terms unsupported by judicial interpretations). All decision-making actions must pass through deterministic rule engines to filter inadmissible outputs, and the system should never execute decision actions autonomously, such as generating legally binding arbitration notices without oversight.", + "bbox": [ + 517, + 459, + 911, + 789 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "8.2.6 Complex Path Exploration Scenarios. In exploration tasks involving multiple possible trajectories, such as differential diagnosis and therapeutic pathway optimization in medicine, weighted ranking search algorithms should balance search depth and breadth. Knowledge graph topology can guide prioritization (e.g., standard treatment procedures for acute coronary syndrome), while Monte Carlo Tree", + "bbox": [ + 517, + 801, + 911, + 905 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Synergizing RAG and Reasoning: A Systematic Review", + "bbox": [ + 84, + 60, + 380, + 71 + ], + "page_idx": 26 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 653, + 60, + 911, + 71 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Search can extend exploration into uncommon differential paths (e.g., rare genetic metabolic disorders). Dynamic pruning threshold functions should be designed (e.g., adjusting the scope of differential diagnosis based on patient history) to eliminate low-confidence hypotheses in real time, thereby controlling computational scale. Brute-force searching of all potential paths (e.g., concurrently testing hundreds of pathogens for nonspecific symptoms) should be avoided to prevent exponential computational scaling. Careful handling of specific token triggers during retrieval (e.g., avoiding spurious associations between \"fever\" and unrelated oncological hyperthermia research) is critical to maintaining logical coherence in diagnostic reasoning.", + "bbox": [ + 81, + 90, + 482, + 287 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "8.3 Opportunity Points", + "text_level": 1, + "bbox": [ + 84, + 305, + 269, + 320 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Based on the Do's and Don'ts of current technologies analyzed in the previous section, there remain numerous directions with substantial academic value and application potential that have yet to be fully explored. This section systematically discusses several promising opportunity points across three dimensions: data and indexing, models and methodologies, and application services.", + "bbox": [ + 81, + 321, + 482, + 429 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "8.3.1 Data and Indexing.", + "text_level": 1, + "bbox": [ + 84, + 443, + 276, + 457 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Cold-Hot Tiered Indexing and Dynamic Context Management. The challenge of managing massive and highly heterogeneous data resources lies in devising an effective cold-hot tiered indexing mechanism that prioritizes data according to their frequency of use and importance. Such a mechanism not only demands classification of data based on timeliness and access frequency but also requires integration with dynamic context management. This allows the system to intelligently retrieve the most relevant data according to the immediate context.", + "bbox": [ + 81, + 470, + 482, + 619 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Moreover, a dynamically updated indexing mechanism can mitigate the loss of data timeliness, which often leads to deteriorated inference accuracy. By ensuring access to the most recent and task-appropriate data, this approach reduces redundancy and incorrect retrievals associated with static indexing. When combined with automated task scheduling and resource allocation strategies, fine-grained real-time inference support can be achieved, significantly enhancing the system's overall efficiency.", + "bbox": [ + 81, + 621, + 482, + 758 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Cross-Institution Knowledge Base Construction. The construction of cross-institution or cross-domain knowledge bases offers new opportunities for advancing RAG+Reasoning research. At the core of large-scale cross-institutional knowledge bases lies the optimization of data integration and sharing mechanisms. This entails addressing challenges such as data security and privacy while adopting standardized data interfaces or leveraging federated learning paradigms to enable multidimensional data integration.", + "bbox": [ + 81, + 770, + 482, + 906 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Through semantic alignment across multiple sources, entity resolution, and concept abstraction, cross-institutional knowledge can be transformed into authoritative and richly contextualized knowledge bases. These enhanced repositories provide robust contextual support for reasoning tasks and can deliver deeper insights in areas such as healthcare, finance, and urban management.", + "bbox": [ + 511, + 90, + 913, + 196 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Fine-Grained Layering and Confidence Grading. In scenarios where retrieval and reasoning operate synchronously, the interpretability and reliability of generated outcomes are paramount. Fine-grained layering of data and indices, along with confidence grading of retrieval results, enables the system to selectively use the most trustworthy and relevant subsets of data during different stages of reasoning. This approach fosters transparency and traceability in final decisions or generative outputs.", + "bbox": [ + 511, + 208, + 926, + 344 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "For instance, in medical diagnosis scenarios, confidence grading can initiate additional verification or expert review in high-risk cases. In the legal domain, confidence layering systematically presents key evidence and identifies sources of uncertainty, reducing reasoning vulnerabilities and minimizing the risk of erroneous conclusions caused by information ambiguity.", + "bbox": [ + 511, + 345, + 913, + 450 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "8.3.2 Models and Methodologies.", + "text_level": 1, + "bbox": [ + 513, + 460, + 769, + 476 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Event-Driven Active Retrieval. Traditional retrieval mechanisms are predominantly passive. However, event-driven active retrieval presents a promising exploration avenue. By monitoring critical events, such as the injection of new data, user interactions, or changes in external sensors, event-triggered retrieval and reasoning processes can be initiated to capture and respond to potential risks and opportunities in real time. Integrating methodologies such as sequence-based event detection or multitask-learning-based intent recognition can facilitate automatic determination of when and how to trigger retrieval actions. Iteratively optimizing these processes contributes to a more efficient and continuous reasoning loop.", + "bbox": [ + 511, + 487, + 921, + 684 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Spatiotemporal-Aware Retrieval and Association. Many applications, such as natural disaster monitoring, traffic flow prediction, and inventory management in retail, exhibit strong dependencies on temporal and spatial dimensions. By incorporating spatiotemporal-aware algorithms, retrieval processes can prioritize or emphasize crucial documents according to constraints tied to time and space. This not only enhances timeliness but also improves the purposefulness and accuracy of reasoning.", + "bbox": [ + 511, + 695, + 919, + 830 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Furthermore, modeling the evolution of events within spatiotemporal dimensions—when combined with semantic indexing and vector-based retrieval mechanisms in RAG—can enable more precise characterization and utilization of complex spatiotemporal dynamics during reasoning.", + "bbox": [ + 511, + 830, + 913, + 907 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 59, + 346, + 71 + ], + "page_idx": 27 + }, + { + "type": "header", + "text": "Gao et al.", + "bbox": [ + 857, + 59, + 911, + 70 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Multimodal Fusion in Retrieval and Reasoning. Multimodal data (e.g., text, images, audio, video, and sensor data) collectively constitute a richer contextual environment, offering critical cues for reasoning tasks. However, existing studies are often limited to the retrieval of single or a few data modalities. Advancing research on multimodal fusion and reasoning mechanisms under the RAG+Reasoning framework has the potential to greatly enhance the system's capacity for addressing complex queries.", + "bbox": [ + 86, + 90, + 480, + 226 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "The research focus lies in constructing cross-modal representation learning and alignment methods, enabling unified representations of the same entities or events across different modalities. During retrieval, confidence scores for each modality can be integrated into a comprehensive ranking process, culminating in multimodal-informed joint decision-making during reasoning. This approach not only improves contextual understanding in complex tasks but also broadens the application scope of RAG technologies in scenarios such as expert systems and autonomous driving, where sensory integration and interpretation are critical.", + "bbox": [ + 86, + 227, + 480, + 392 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Dynamic Risk Propagation Modeling and Management. The tight coupling of retrieval and reasoning with multi-stage decision-making inevitably introduces risk propagation issues. Misjudgments of high-risk or low-confidence documents during upstream retrieval are often inherited by downstream reasoning processes, amplifying uncertainties and increasing error margins. To address this, dynamic risk modeling should be embedded within retrieval workflows, enabling risk quantification, tracking, and management at multiple stages. When necessary, risk mitigation mechanisms or process rollbacks can be triggered, creating a closed-loop correction framework.", + "bbox": [ + 86, + 409, + 480, + 588 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Incorporating strategies for analyzing and managing risk propagation is not only a technical challenge but also a matter of system deployment and standardization. In high-stakes domains such as healthcare and financial risk management, establishing comprehensive safety standards and compliance protocols will be crucial. These protocols should treat dynamic risk propagation management as a critical component of evaluating and iterating knowledge retrieval and reasoning systems.", + "bbox": [ + 86, + 590, + 480, + 724 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "8.3.3 Application Services.", + "text_level": 1, + "bbox": [ + 86, + 742, + 290, + 755 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Validation of Logical Chain Completeness. While RAG with Reasoning can provide partially interpretable reasoning outputs, verifying the completeness of logical chains remains a challenge. Future research could integrate formal verification or symbolic reasoning techniques to ensure consistency and completeness across key reasoning nodes and intermediate conclusions. This would prevent logical gaps or illogical leaps in reasoning, offering robust regulatory support for high-stakes industries such as law and finance.", + "bbox": [ + 86, + 771, + 480, + 905 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Intervenable Generation During Reasoning. Contemporary Agentic RAG often operate as \"black boxes,\" rendering external interventions nearly impossible during generative reasoning tasks. However, providing mechanisms for human intervention—such as through visualization or interactive interfaces—could enable experts or users to perform manual corrections, initialize prior knowledge, or modify interim assumptions during the reasoning process. This would substantially enhance the system's flexibility and safety.", + "bbox": [ + 517, + 90, + 911, + 226 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Specifically, intervenable generation allows not only post hoc error corrections but also proactive identification and rectification of potential risks or biases at earlier stages. Interactive interpretable reasoning platforms or visualization tools grounded in knowledge graphs could empower users to scrutinize and influence reasoning workflows, thereby enhancing confidence and control in decision-making processes across diverse domains.", + "bbox": [ + 517, + 227, + 911, + 345 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Risk Decision Interception Firewalls. In closed-loop automated tasks such as algorithmic trading or medical diagnostic decision-making, erroneous reasoning outputs can lead to catastrophic outcomes. To mitigate such risks, the system architecture should incorporate risk decision interception firewalls, which perform multidimensional validations at critical reasoning nodes or prior to outputting decisions. When confidence levels or high-risk indicators breach thresholds, these firewalls can block decision outputs or escalate them for stricter human review.", + "bbox": [ + 517, + 356, + 911, + 503 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "This mechanism serves as a \"final line of defense\" for RAG+Reasoning systems, ensuring decision security in large-scale automated information networks. It also provides a robust foundation for compliance and regulatory auditing, enabling safer deployment in critical applications.", + "bbox": [ + 517, + 506, + 911, + 580 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Edge-Cloud Collaborative Retrieval and Reasoning. With the rapid development of IoT and 5G technologies, many scenarios demand on-site data collection and preliminary processing on edge devices, followed by high-level retrieval and reasoning tasks on cloud platforms. Efficiently partitioning tasks, allocating resources, and maintaining consistency between indexes and models across the edge-cloud continuum represent critical research directions.", + "bbox": [ + 517, + 590, + 911, + 709 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "Leveraging techniques such as lightweight model compression, distributed index synchronization, and communication optimization can ensure fast reasoning while maximizing resource utilization. Edge-cloud collaborative solutions are particularly impactful for real-time industrial monitoring and smart city applications, reducing network latency and bandwidth bottlenecks while ensuring accurate and timely inference outputs.", + "bbox": [ + 517, + 710, + 911, + 830 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "In summary, RAG+Reasoning systems present many untapped opportunities across various dimensions. Further research and practical validation could greatly improve their use in complex, high-risk scenarios while fueling new growth in GenAI.", + "bbox": [ + 517, + 832, + 911, + 904 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "Synergizing RAG and Reasoning: A Systematic Review", + "bbox": [ + 84, + 59, + 380, + 71 + ], + "page_idx": 28 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 653, + 59, + 911, + 71 + ], + "page_idx": 28 + }, + { + "type": "text", + "text": "9 Future Trends", + "text_level": 1, + "bbox": [ + 84, + 90, + 241, + 104 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "In this chapter, we summarize four major trends in technological advancements based on current research, aiming to elucidate and guide the potential future directions of RAG.", + "bbox": [ + 84, + 109, + 480, + 155 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "9.1 The Integration of RAG and Graph", + "text_level": 1, + "bbox": [ + 84, + 178, + 380, + 193 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Recent developments have witnessed a growing synergy between RAG systems and graph-based approaches. The intrinsic benefits of graph structures, such as explicit logical relationships and knowledge indexing, have enabled new paradigms for addressing challenges in global reasoning, dynamic data management, and personalized services within RAG systems.", + "bbox": [ + 84, + 196, + 480, + 301 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Knowledge Organization.", + "text_level": 1, + "bbox": [ + 102, + 303, + 290, + 316 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Graph-structured knowledge organization frameworks offer a powerful alternative to traditional vector-based retrieval methods, excelling in modeling complex relationships and supporting global reasoning. For example, GraphRAG [18] combines hierarchical graph indexing with community detection to extract entity relationship networks from text corpora, enabling large-scale thematic analysis through hierarchical summaries. Building on this, PIKE [82] introduces a multi-level heterogeneous knowledge graph that organizes documents, semantic segments, and refined knowledge units into a three-layer hierarchy, improving extraction accuracy and multi-hop reasoning via atomized knowledge construction and task decomposition. For dynamic personalization, EMG-RAG [89] features a three-layer Editable Memory Graph architecture that structures memory data by ontology classification, subclass, and entity relationships, using reinforcement learning to enable real-time updates and multidimensional queries. Together, these advances leverage graph topologies to address the limitations of conventional RAG systems—such as one-dimensional representation and weak contextual links—enabling multilevel reasoning from local fact retrieval to global thematic summarization and forming a foundation for interpretable, adaptive RAG systems.", + "bbox": [ + 86, + 318, + 482, + 664 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Symbolic Reasoning. Graph-structured symbolic reasoning methods leverage the multi-hop reasoning power of Knowledge Graphs (KG) to better manage complex semantic and logical relationships. Frameworks like HippoRAG2 and the Think-on-Graph (ToG) [60] series exemplify this. HippoRAG2 [28] builds open knowledge graphs and uses personalized PageRank with a dense-sparse coding approach inspired by brain memory, boosting performance in factual memory, semantic understanding, and multi-hop reasoning. Likewise, ToG-2 combines iterative retrieval of knowledge graphs and documents, using relationship discovery, entity pruning, and context-driven graph searches to integrate fine-grained information from unstructured text, enhancing implicit relationship detection.", + "bbox": [ + 86, + 665, + 482, + 875 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Task Planning. Graph-based task planning in RAG systems enhances complex problem-solving by overcoming the", + "bbox": [ + 84, + 876, + 480, + 906 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "limitations of traditional linear workflows, which struggle with multi-step or multimodal reasoning. These approaches build dynamic knowledge graphs, like Mind Maps, to explicitly model logical dependencies and context. For instance, the Agentic Reasoning [92] transforms reasoning chains into graph structures for entity extraction, relation identification, and community clustering, enabling dynamic path tracking and optimized retrieval, excelling in tasks like doctoral-level GPQA [67]. Collaborative frameworks such as Co-STORM extend this to multi-agent scenarios, representing queries, tool calls, and knowledge integration as traversable graph nodes to support task decomposition and adaptive reasoning.", + "bbox": [ + 517, + 90, + 911, + 271 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Tool Usage and Management. Graph-enhanced approaches to tool management overcome limitations of traditional dependency modeling by effectively capturing complex relationships like parameter passing, functional collaboration, and resource management. Graph RAG-Tool Fusion [57] models tools as graph nodes within a dual-layer architecture of core system APIs and domain-specific tools, encoding direct and indirect dependencies as edges. It uses a two-stage retrieval process: vector-based tool retrieval followed by a graph-based depth-first search to assemble dependency-compliant toolsets.", + "bbox": [ + 517, + 273, + 931, + 436 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "9.2 Multi-Model Collaboration", + "text_level": 1, + "bbox": [ + 517, + 450, + 754, + 463 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Multi-model collaboration has emerged as a pivotal strategy for enhancing task complexity handling and domain adaptability in RAG systems [13]. By integrating the strengths of different models, this approach achieves optimized performance. For example, the CR-Planner [52] combines general-purpose generation models (e.g., GPT-4) with domain-specific critic models (e.g., Llama-3-8B). This hybrid system dynamically orchestrates subgoal planning and execution evaluation, utilizing MCTS to generate high-quality training data. Similarly, UAR [14] employs intent-aware and knowledgerequirement classifiers to dynamically trigger retrieval, decoupling lightweight classification tasks from resource-intensive decoding operations of LLMs. Furthermore, Adaptive-RAG [41] deploys small-complexity classifiers to route queries into different levels of processing strategies, balancing response speed for simple queries with deep reasoning for complex ones. These strategies form a closed \"generation-evaluation\"loop, leveraging complementary strengths across models to achieve improved accuracy and computational efficiency.", + "bbox": [ + 517, + 468, + 934, + 755 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "9.3 Multi-Modal Collaboration", + "text_level": 1, + "bbox": [ + 517, + 767, + 754, + 780 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "The breakthrough in Chain-of-Thought (CoT) capabilities of language models has catalyzed the transition of multimodal reasoning from perceptual-level integration to cognitive-level reasoning, promoting Multimodal Collaborative Reasoning as a key trend [4] By deeply integrating the logical reasoning capabilities of language models with the spatial-semantic representation of multimodal data, it significantly enhances information synthesis in complex scenarios [2].", + "bbox": [ + 517, + 785, + 911, + 906 + ], + "page_idx": 29 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 59, + 346, + 71 + ], + "page_idx": 29 + }, + { + "type": "header", + "text": "Gao et al.", + "bbox": [ + 857, + 59, + 911, + 70 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "For instance, in the medical domain, multimodal RAG systems such as MedCoT [56] utilize hierarchical expert systems to integrate CT imaging and pathology reports, enabling knowledge graph validation of diagnostic hypotheses and reducing misdiagnosis risks. Future research will likely focus on robust cross-modal knowledge alignment, progressive knowledge distillation, and adaptive reasoning frameworks.", + "bbox": [ + 81, + 90, + 480, + 196 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "9.4 Customized Reinforcement Learning", + "text_level": 1, + "bbox": [ + 83, + 222, + 398, + 237 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "The application of reinforcement learning (RL) in RAG systems has become instrumental in improving module coordination and enhancing overall efficiency. Recent studies focus on designing reward mechanisms tailored to the specific needs of RAG systems. Frameworks such as RAG-Gym [96] and DeepRAG [24] model reasoning processes using Markov Decision Processes and introduce fine-grained process supervision mechanisms. Additionally, ReARTeR [49] and SmartRAG [20] incorporate trust-aware reward strategies and end-to-end policy optimization to achieve superior accuracy and robustness. Opportunities remain for further exploring automated reward modeling with LLMs to facilitate fine-grained supervision.", + "bbox": [ + 81, + 239, + 482, + 436 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "10 Conclusion", + "text_level": 1, + "bbox": [ + 84, + 464, + 228, + 478 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "This paper has systematically reviewed the synergistic integration of Retrieval-Augmented Generation (RAG) and reasoning, providing a formal definition of reasoning within the RAG framework as a structured, multi-step, goal-driven process that dynamically combines parametric and retrieved knowledge to address complex problems.", + "bbox": [ + 81, + 483, + 482, + 574 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "We presented a comprehensive taxonomy covering the purposes, collaboration paradigms, and implementation methods underlying RAG+Reasoning systems. The synergy enables more precise retrieval informed by logical analysis and enhances reasoning with contextually relevant, up-to-date knowledge beyond parametric limitations.", + "bbox": [ + 81, + 574, + 485, + 664 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "While the enhanced reasoning capabilities allow tackling complex knowledge-intensive tasks such as deep research, expert-level problem solving, and domain-specific decision support, practical challenges remain. These include computational and token costs that grow non-linearly, risks of overthinking leading to inefficiency and error propagation, and the lack of evaluation frameworks that effectively assess intermediate reasoning quality alongside final results.", + "bbox": [ + 81, + 665, + 482, + 785 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "To bridge the gap from theory to real-world application, we proposed practical design guidelines tailored to diverse domains like finance, healthcare, law, and personal assistants, emphasizing adaptability to heterogeneous, dynamic knowledge sources and strict requirements for output reliability and traceability.", + "bbox": [ + 81, + 785, + 482, + 876 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Finally, we identified promising directions for future research, including graph-structured knowledge integration,", + "bbox": [ + 83, + 876, + 482, + 907 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "multimodal and multi-model collaborative reasoning architectures, and advanced reinforcement learning techniques for optimizing retrieval-reasoning workflows.", + "bbox": [ + 511, + 90, + 913, + 136 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "Overall, this work establishes both a theoretical foundation and practical roadmap to drive the development of next-generation RAG+Reasoning systems capable of robust, transparent, and efficient cognition, paving the way for impactful applications across academia and industry.", + "bbox": [ + 511, + 136, + 913, + 212 + ], + "page_idx": 30 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 514, + 231, + 617, + 246 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Abdelrahman Abdallah, Bhawna Piryani, Jamshid Mozafari, Mohammed Ali, and Adam Jatowt. 2025. Rankify: A comprehensive python toolkit for retrieval, re-ranking, and retrieval-augmented generation. arXiv preprint arXiv:2502.02464 (2025).", + "[2] Mohammad Mahdi Abootorabi, Amirhosein Zobeiri, Mahdi Dehghani, Mohammadali Mohammadkhani, Bardia Mohammadi, Omid Ghahroodi, Mahdieh Soleymani Baghshah, and Ehsaneddin Asgari. 2025. Ask in Any Modality: A Comprehensive Survey on Multimodal Retrieval-Augmented Generation. arXiv preprint arXiv:2502.08826 (2025).", + "[3] Akari Asai, Zeqiu Wu, Yizhong Wang, Avirup Sil, and Hannaneh Hajishirzi. 2023. Self-rag: Learning to retrieve, generate, and critique through self-reflection. In The Twelfth International Conference on Learning Representations.", + "[4] Jing Bi, Susan Liang, Xiaofei Zhou, Pinxin Liu, Junjia Guo, Yunlong Tang, Luchuan Song, Chao Huang, Guangyu Sun, Jinxi He, et al. 2025. Why Reasoning Matters? A Survey of Advancements in Multimodal Reasoning (v1). arXiv preprint arXiv:2504.03151 (2025).", + "[5] Yuxi Bi, Yunfan Gao, and Haofen Wang. 2025. StePO-Rec: Towards Personalized Outfit Styling Assistant via Knowledge-Guided Multi-Step Reasoning. arXiv preprint arXiv:2504.09915 (2025).", + "[6] Mingyang Chen, Tianpeng Li, Haoze Sun, Yijie Zhou, Chenzheng Zhu, Fan Yang, Zenan Zhou, Weipeng Chen, Haofen Wang, Jeff Z Pan, et al. 2025. Learning to Reason with Search for LLMs via Reinforcement Learning. arXiv preprint arXiv:2503.19470 (2025).", + "[7] Peter Baile Chen, Yi Zhang, Michael Cafarella, and Dan Roth. 2025. Can we Retrieve Everything All at Once? ARM: An Alignment-Oriented LLM-based Retrieval Method. arXiv preprint arXiv:2501.18539 (2025).", + "[8] Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiannan Guan, Peng Wang, Mengkang Hu, Yuhang Zhou, Te Gao, and Wangxiang Che. 2025. Towards reasoning era: A survey of long chain-of-thought for reasoning large language models. arXiv preprint arXiv:2503.09567 (2025).", + "[9] Wenhu Chen, Ming Yin, Max Ku, Pan Lu, Yixin Wan, Xueguang Ma, Jianyu Xu, Xinyi Wang, and Tony Xia. 2023. Theoremqa: A theorem-driven question answering dataset. arXiv preprint arXiv:2305.12524 (2023).", + "[10] Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. 2024. Do not think that much for $2 + 3 = ?$ on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187 (2024).", + "[11] Yixiang Chen, Penglei Sun, Xiang Li, and Xiaowen Chu. 2025. MRD-RAG: Enhancing Medical Diagnosis with Multi-Round Retrieval-Augmented Generation. arXiv preprint arXiv:2504.07724 (2025).", + "[12] Yiqun Chen, Lingyong Yan, Weiwei Sun, Xinyu Ma, Yi Zhang, Shuaiqiang Wang, Dawei Yin, Yiming Yang, and Jiaxin Mao. 2025. Improving Retrieval-Augmented Generation through Multi-Agent Reinforcement Learning. arXiv preprint arXiv:2501.15228 (2025).", + "[13] Zhijun Chen, Jingzheng Li, Pengpeng Chen, Zhuoran Li, Kai Sun, Yuankai Luo, Qianren Mao, Dingqi Yang, Hailong Sun, and Philip S Yu. 2025. Harnessing Multiple Large Language Models: A Survey on" + ], + "bbox": [ + 522, + 250, + 913, + 906 + ], + "page_idx": 30 + }, + { + "type": "header", + "text": "Synergizing RAG and Reasoning: A Systematic Review", + "bbox": [ + 84, + 59, + 380, + 71 + ], + "page_idx": 30 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 651, + 59, + 911, + 71 + ], + "page_idx": 30 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "LLM Ensemble. arXiv preprint arXiv:2502.18036 (2025).", + "[14] Qinyuan Cheng, Xiaonan Li, Shimin Li, Qin Zhu, Zhangyue Yin, Yunfan Shao, Linyang Li, Tianxiang Sun, Hang Yan, and Xipeng Qiu. 2024. Unified active retrieval for retrieval augmented generation. arXiv preprint arXiv:2406.12534 (2024).", + "[15] Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, et al. 2025. The Danger of Overthinking: Examining the Reasoning-Action Dilemma in Agentic Tasks. arXiv preprint arXiv:2502.08235 (2025).", + "[16] Alan Dao and Thinh Le. 2025. ReZero: Enhancing LLM search ability by trying one-more-time. arXiv:2504.11001 [cs.CL] https://arxiv.org/abs/2504.11001", + "[17] Tim Dettmers, Artidoro Pagnoni, Ari Holtzman, and Luke Zettlemoyer. 2023. Qlora: Efficient finetuning of quantized llms. Advances in neural information processing systems 36 (2023), 10088-10115.", + "[18] Darren Edge, Ha Trinh, Newman Cheng, Joshua Bradley, Alex Chao, Apurva Mody, Steven Truitt, Dasha Metropolitansky, Robert Oazuwa Ness, and Jonathan Larson. 2024. From local to global: A graph rag approach to query-focused summarization. arXiv preprint arXiv:2404.16130 (2024).", + "[19] Chenrui Fan, Ming Li, Lichao Sun, and Tianyi Zhou. 2025. Missing Premise exacerbates Overthinking: Are Reasoning Models losing Critical Thinking Skill? arXiv preprint arXiv:2504.06514 (2025).", + "[20] Jingsheng Gao, Linxu Li, Weiyuan Li, Yuzhuo Fu, and Bin Dai. 2024. SmartRAG: Jointly Learn RAG-Related Tasks From the Environment Feedback. arXiv preprint arXiv:2410.18141 (2024).", + "[21] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, and Haofen Wang. 2023. Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997 (2023).", + "[22] Yunfan Gao, Yun Xiong, Meng Wang, and Haofen Wang. 2024. Modular rag: Transforming rag systems into lego-like reconfigurable frameworks. arXiv preprint arXiv:2407.21059 (2024).", + "[23] Zengyi Gao, Yukun Cao, Hairu Wang, Ao Ke, Yuan Feng, Xike Xie, and S Kevin Zhou. 2025. FRAG: A Flexible Modular Framework for Retrieval-Augmented Generation based on Knowledge Graphs. arXiv preprint arXiv:2501.09957 (2025).", + "[24] Xinyan Guan, Jiali Zeng, Fandong Meng, Chunlei Xin, Yaojie Lu, Hongyu Lin, Xianpei Han, Le Sun, and Jie Zhou. 2025. DeepRAG: Thinking to Retrieve Step by Step for Large Language Models. arXiv preprint arXiv:2502.01142 (2025).", + "[25] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025).", + "[26] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025).", + "[27] Zirui Guo, Lianghao Xia, Yanhua Yu, Tu Ao, and Chao Huang. 2024. Lighthrag: Simple and fast retrieval-augmented generation. (2024).", + "[28] Bernal Jiménez Gutiérrez, Yiheng Shu, Weijian Qi, Sizhe Zhou, and Yu Su. 2025. From RAG to Memory: Non-Parametric Continual Learning for Large Language Models. arXiv preprint arXiv:2502.14802 (2025).", + "[29] Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, et al. 2024. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008 (2024).", + "[30] Yancheng He, Shilong Li, Jiaheng Liu, Weixun Wang, Xingyuan Bu, Ge Zhang, Zhongyuan Peng, Zhaoxiang Zhang, Zhicheng Zheng, Wenbo Su, et al. 2025. Can Large Language Models Detect Errors in Long Chain-of-Thought Reasoning? arXiv preprint arXiv:2502.19361" + ], + "bbox": [ + 91, + 93, + 483, + 898 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "(2025).", + "[31] Xanh Ho, Anh-Khoa Duong Nguyen, Saku Sugawara, and Akiko Aizawa. 2020. Constructing a multi-hop qa dataset for comprehensive evaluation of reasoning steps. arXiv preprint arXiv:2011.01060 (2020).", + "[32] Yubin Hong, Chaofan Li, Jingyi Zhang, and Yingxia Shao. 2025. FG-RAG: Enhancing Query-Focused Summarization with Context-Aware Fine-Grained Graph RAG. arXiv preprint arXiv:2504.07103 (2025).", + "[33] SU Hongjin, Howard Yen, Mengzhou Xia, Weijia Shi, Niklas Muennighoff, Han-yu Wang, Liu Haisu, Quan Shi, Zachary S Siegel, Michael Tang, et al. 2024. BRIGHT: A Realistic and Challenging Benchmark for Reasoning-Intensive Retrieval. In The Thirteenth International Conference on Learning Representations.", + "[34] Sheryl Hsu, Omar Khattab, Chelsea Finn, and Archit Sharma. 2024. Grounding by trying: Llms with reinforcement learning-enhanced retrieval. arXiv preprint arXiv:2410.23214 (2024).", + "[35] Jian Hu. 2025. REINFORCE++: A Simple and Efficient Approach for Aligning Large Language Models. arXiv preprint arXiv:2501.03262 (2025).", + "[36] Yunhai Hu, Yilun Zhao, Chen Zhao, and Arman Cohan. 2025. MCTS-RAG: Enhancing Retrieval-Augmented Generation with Monte Carlo Tree Search. arXiv preprint arXiv:2503.20757 (2025).", + "[37] Fantine Huot, Reinald Kim Amplayo, Jennimaria Palomaki, Alice Shoshana Jakobovits, Elizabeth Clark, and Mirella Lapata. 2024. Agents' Room: Narrative Generation through Multi-step Collaboration. arXiv preprint arXiv:2410.02603 (2024).", + "[38] Shayekh Bin Islam, Md Asib Rahman, KSM Hossain, Enamul Hoque, Shafiq Joty, and Md Rizwan Parvez. 2024. Open-rag: Enhanced retrieval-augmented reasoning with open-source large language models. arXiv preprint arXiv:2410.01782 (2024).", + "[39] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. 2024. Openai o1 system card. arXiv preprint arXiv:2412.16720 (2024).", + "[40] Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. 2024. Livecodebench: Holistic and contamination free evaluation of large language models for code. arXiv preprint arXiv:2403.07974 (2024).", + "[41] Soyeong Jeong, Jinheon Baek, Sukmin Cho, Sung Ju Hwang, and Jong C Park. 2024. Adaptive-rag: Learning to adapt retrieval-augmented large language models through question complexity arXiv preprint arXiv:2403.14403 (2024).", + "[42] Pengcheng Jiang. 2025. DeepRetrieval: Powerful Query Generation for Information Retrieval with Reinforcement Learning. arXiv preprint arXiv:2503.00223 (2025).", + "[43] Yucheng Jiang, Yijia Shao, Dekun Ma, Sina J Semnani, and Monica S Lam. 2024. Into the unknown unknowns: Engaged human learning through participation in language model agent conversations. arXiv preprint arXiv:2408.15232 (2024).", + "[44] Yucheng Jiang, Yijia Shao, Dekun Ma, Sina J Semnani, and Monica S Lam. 2024. Into the unknown unknowns: Engaged human learning through participation in language model agent conversations. arXiv preprint arXiv:2408.15232 (2024).", + "[45] Zhengbao Jiang, Frank F Xu, Luyu Gao, Zhiqing Sun, Qian Liu, Jane Dwivedi-Yu, Yiming Yang, Jamie Callan, and Graham Neubig. 2023 Active retrieval augmented generation. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing 7969-7992.", + "[46] Ashutosh Joshi, Sheikh Muhammad Sarwar, Samarth Varshney, Sreyashi Nag, Shrivats Agrawal, and Juhi Naik. 2024. REAPER: Reasoning based retrieval planning for complex RAG systems. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 4621-4628." + ], + "bbox": [ + 522, + 94, + 913, + 885 + ], + "page_idx": 31 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 59, + 346, + 71 + ], + "page_idx": 31 + }, + { + "type": "header", + "text": "Gao et al.", + "bbox": [ + 857, + 59, + 911, + 70 + ], + "page_idx": 31 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[47] Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, et al. 2019. Natural questions: a benchmark for question answering research. Transactions of the Association for Computational Linguistics 7 (2019), 453-466.", + "[48] Myeonghwa Lee, Seonho An, and Min-Soo Kim. 2024. PlanRAG: A plan-then-retrieval augmented generation for generative large language models as decision makers. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers). 6537–6555.", + "[49] Zhicheng Lee, Shulin Cao, Jinxin Liu, Jiajie Zhang, Weichuan Liu, Xiaoyin Che, Lei Hou, and Juanzi Li. 2025. ReaRAG: Knowledge-guided Reasoning Enhances Factuality of Large Reasoning Models with Iterative Retrieval Augmented Generation. arXiv preprint arXiv:2503.21729 (2025).", + "[50] Jinzheng Li, Jingshu Zhang, Hongguang Li, and Yiqing Shen. 2024. An Agent Framework for Real-Time Financial Information Searching with Large Language Models. arXiv preprint arXiv:2502.15684 (2024).", + "[51] Xiaoxi Li, Guanting Dong, Jiajie Jin, Yuyao Zhang, Yujia Zhou, Yutao Zhu, Peitian Zhang, and Zhicheng Dou. 2025. Search-01: Agentic search-enhanced large reasoning models. arXiv preprint arXiv:2501.05366 (2025).", + "[52] Xingxuan Li, Weiwen Xu, Ruochen Zhao, Fangkai Jiao, Shafiq Joty, and Lidong Bing. 2024. Can We Further Elicit Reasoning in LLMs? Critic-Guided Planning with Retrieval-Augmentation for Solving Challenging Tasks. arXiv preprint arXiv:2410.01428 (2024).", + "[53] Xingxuan Li, Weiwen Xu, Ruochen Zhao, Fangkai Jiao, Shafiq Joty, and Lidong Bing. 2024. Can We Further Elicit Reasoning in LLMs? Critic-Guided Planning with Retrieval-Augmentation for Solving Challenging Tasks. arXiv preprint arXiv:2410.01428 (2024).", + "[54] Zhuoqun Li, Haiyang Yu, Xuanang Chen, Hongyu Lin, Yaojie Lu, Fei Huang, Xianpei Han, Yongbin Li, and Le Sun. 2025. Deepsolution: Boosting complex engineering solution design via tree-based exploration and bi-point thinking. arXiv preprint arXiv:2502.20730 (2025).", + "[55] Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. 2023. Let's verify step by step. In *The Twelfth International Conference on Learning Representations*.", + "[56] Jiaxiang Liu, Yuan Wang, Jiawei Du, Joey Tianyi Zhou, and Zuozhu Liu. 2024. Medcot: Medical chain of thought via hierarchical expert. arXiv preprint arXiv:2412.13736 (2024).", + "[57] Elias Lumer, Pradeep Honaganahalli Basavaraju, Myles Mason, James A Burke, and Vamse Kumar Subbiah. 2025. Graph RAG-Tool Fusion. arXiv preprint arXiv:2502.07223 (2025).", + "[58] Haoran Luo, Yikai Guo, Qika Lin, Xiaobao Wu, Xinyu Mu, Wenhao Liu, Meina Song, Yifan Zhu, Luu Anh Tuan, et al. 2025. KBQA-o1: Agentic Knowledge Base Question Answering with Monte Carlo Tree Search. arXiv preprint arXiv:2501.18922 (2025).", + "[59] Yuanjie Lyu, Zhiyu Li, Simin Niu, Feiyu Xiong, Bo Tang, Wenjin Wang, Hao Wu, Huanyong Liu, Tong Xu, and Enhong Chen. 2025. Crud-rag: A comprehensive chinese benchmark for retrieval-augmented generation of large language models. ACM Transactions on Information Systems 43, 2 (2025), 1-32.", + "[60] Shengjie Ma, Chengjin Xu, Xuhui Jiang, Muzhi Li, Huaren Qu, Cehao Yang, Jiaxin Mao, and Jian Guo. 2024. Think-on-Graph 2.0: Deep and Faithful Large Language Model Reasoning with Knowledge-guided Retrieval Augmented Generation. arXiv preprint arXiv:2407.10805 (2024).", + "[61] Xinbei Ma, Yeyun Gong, Pengcheng He, Hai Zhao, and Nan Duan. 2023. Query rewriting in retrieval-augmented large language models. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. 5303-5315." + ], + "bbox": [ + 89, + 92, + 483, + 898 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[62] Grégoire Mialon, Clémentine Fourrier, Thomas Wolf, Yann LeCun, and Thomas Scialom. 2023. Gaia: a benchmark for general ai assistants. In The Twelfth International Conference on Learning Representations.", + "[63] Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. 2025. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393 (2025).", + "[64] Shishir G Patil, Tianjun Zhang, Xin Wang, and Joseph E Gonzalez. 2024. Gorilla: Large language model connected with massive apis. Advances in Neural Information Processing Systems 37 (2024), 126544-126565.", + "[65] Fabio Petroni, Aleksandra Piktus, Angela Fan, Patrick Lewis, Majid Yazdani, Nicola De Cao, James Thorne, Yacine Jernite, Vladimir Karpukhin, Jean Maillard, et al. 2020. KILT: a benchmark for knowledge intensive language tasks. arXiv preprint arXiv:2009.02252 (2020).", + "[66] Pouya Pezeshkpour and Estevam Hruschka. 2025. Insight-RAG: Enhancing LLMs with Insight-Driven Augmentation. arXiv preprint arXiv:2504.00187 (2025).", + "[67] David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R Bowman. 2024. Gpqa: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling.", + "[68] Zhihong Shao, Yeyun Gong, Yelong Shen, Minlie Huang, Nan Duan, and Weizhu Chen. 2023. Enhancing retrieval-augmented large language models with iterative retrieval-generation synergy. arXiv preprint arXiv:2305.15294 (2023).", + "[69] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300 (2024).", + "[70] Quan Shi, Michael Tang, Karthik Narasimhan, and Shunyu Yao. 2024. Can Language Models Solve Olympiad Programming? arXiv preprint arXiv:2404.10952 (2024).", + "[71] Quan Shi, Michael Tang, Karthik Narasimhan, and Shunyu Yao. 2024. Can Language Models Solve Olympiad Programming? arXiv preprint arXiv:2404.10952 (2024).", + "[72] Huatong Song, Jinhao Jiang, Yingqian Min, Jie Chen, Zhipeng Chen, Wayne Xin Zhao, Lei Fang, and Ji-Rong Wen. 2025. R1-Searcher: Incentivizing the Search Capability in LLMs via Reinforcement Learning. arXiv preprint arXiv:2503.05592 (2025).", + "[73] Sakhinana Sagar Srinivas and Venkataramana Runkana. 2025. Scaling Test-Time Inference with Policy-Optimized, Dynamic Retrieval-Augmented Generation via KV Caching and Decoding. arXiv preprint arXiv:2504.01281 (2025).", + "[74] Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Hanjie Chen, Xia Hu, et al. 2025. Stop overthinking: A survey on efficient reasoning for large language models. arXiv preprint arXiv:2503.16419 (2025).", + "[75] Zhongxiang Sun, Qipeng Wang, Weijie Yu, Xiaoxue Zang, Kai Zheng, Jun Xu, Xiao Zhang, Song Yang, and Han Li. 2025. ReARTeR: Retrieval-Augmented Reasoning with Trustworthy Process Rewarding. arXiv preprint arXiv:2501.07861 (2025).", + "[76] Alon Talmor and Jonathan Berant. 2018. The web as a knowledge-base for answering complex questions. arXiv preprint arXiv:1803.06643 (2018).", + "[77] Hieu Tran, Zonghai Yao, Junda Wang, Yifan Zhang, Zhichao Yang, and Hong Yu. 2024. RARE: Retrieval-Augmented Reasoning Enhancement for Large Language Models. arXiv preprint arXiv:2412.02830 (2024).", + "[78] Harsh Trivedi, Niranjan Balasubramanian, Tushar Khot, and Ashish Sabharwal. 2022. Interleaving retrieval with chain-of-thought reasoning for knowledge-intensive multi-step questions. arXiv preprint arXiv:2212.10509 (2022)." + ], + "bbox": [ + 522, + 92, + 913, + 883 + ], + "page_idx": 32 + }, + { + "type": "header", + "text": "Synergizing RAG and Reasoning: A Systematic Review", + "bbox": [ + 84, + 59, + 380, + 71 + ], + "page_idx": 32 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 651, + 59, + 911, + 71 + ], + "page_idx": 32 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[79] Harsh Trivedi, Niranjan Balasubramanian, Tushar Khot, and Ashish Sabharwal. 2022. MuSiQue: Multihop Questions via Single-hop Question Composition. Transactions of the Association for Computational Linguistics 10 (2022), 539-554.", + "[80] Tu Vu, Mohit Iyyer, Xuezhi Wang, Noah Constant, Jerry Wei, Jason Wei, Chris Tar, Yun-Hsuan Sung, Denny Zhou, Quoc Le, et al. 2023. Freshllms: Refreshing large language models with search engine augmentation. arXiv preprint arXiv:2310.03214 (2023).", + "[81] Ante Wang, Linfeng Song, Ye Tian, Dian Yu, Haitao Mi, Xiangyu Duan, Zhaopeng Tu, Jinsong Su, and Dong Yu. 2025. Don't Get Lost in the Trees: Streamlining LLM Reasoning by Overcoming Tree Search Exploration Pitfalls. arXiv preprint arXiv:2502.11183 (2025).", + "[82] Jinyu Wang, Jingjing Fu, Rui Wang, Lei Song, and Jiang Bian. 2025. PIKE-RAG: sPecialized Knowledge and Rationale Augmented Generation. arXiv preprint arXiv:2501.11551 (2025).", + "[83] Liang Wang, Haonan Chen, Nan Yang, Xiaolong Huang, Zhicheng Dou, and Furu Wei. 2025. Chain-of-Retrieval Augmented Generation. arXiv preprint arXiv:2501.14342 (2025).", + "[84] Ruobing Wang, Daren Zha, Shi Yu, Qingfei Zhao, Yuxuan Chen, Yixuan Wang, Shuo Wang, Yukun Yan, Zhenghao Liu, Xu Han, et al. 2024. Retriever-and-Memory: Towards Adaptive Note-Enhanced Retrieval-Augmented Generation. arXiv preprint arXiv:2410.08821 (2024).", + "[85] Siqi Wang, Chao Liang, Yunfan Gao, Yang Liu, Jing Li, and Haofen Wang. 2024. Decoding Urban Industrial Complexity: Enhancing Knowledge-Driven Insights via IndustryScopeGPT. In Proceedings of the 32nd ACM International Conference on Multimedia. 4757-4765.", + "[86] Shuting Wang, Jiongnan Liu, Shiren Song, Jiehan Cheng, Yuqi Fu, Peidong Guo, Kun Fang, Yutao Zhu, and Zhicheng Dou. 2024. Domainrag: A chinese benchmark for evaluating domain-specific retrieval-augmented generation. arXiv preprint arXiv:2406.05654 (2024).", + "[87] Xidong Wang, Guiming Hardy Chen, Dingjie Song, Zhiyi Zhang, Zhihong Chen, Qingying Xiao, Feng Jiang, Jianquan Li, Xiang Wan, Benyou Wang, et al. 2023. Cmb: A comprehensive medical benchmark in chinese. arXiv preprint arXiv:2308.08833 (2023).", + "[88] Xiaohua Wang, Zhenghua Wang, Xuan Gao, Feiran Zhang, Yixin Wu, Zhibo Xu, Tianyuan Shi, Zhengyuan Wang, Shizheng Li, Qi Qian, et al. 2024. Searching for best practices in retrieval-augmented generation. arXiv preprint arXiv:2407.01219 (2024).", + "[89] Zheng Wang, Zhongyang Li, Zeren Jiang, Dandan Tu, and Wei Shi. 2024. Crafting Personalized Agents through Retrieval-Augmented Generation on Editable Memory Graphs. arXiv preprint arXiv:2409.19401 (2024).", + "[90] Zhengren Wang, Jiayang Yu, Dongsheng Ma, Zhe Chen, Yu Wang, Zhiyu Li, Feiyu Xiong, Yanfeng Wang, Linpeng Tang, Wentao Zhang, et al. 2025. RARE: Retrieval-Augmented Reasoning Modeling. arXiv preprint arXiv:2503.23513 (2025).", + "[91] Yixuan Weng, Minjun Zhu, Guangsheng Bao, Hongbo Zhang, Jindong Wang, Yue Zhang, and Linyi Yang. 2024. Cyclereresearcher: Improving automated research via automated review. arXiv preprint arXiv:2411.00816 (2024).", + "[92] Junde Wu, Jiayuan Zhu, and Yuyuan Liu. 2025. Agentic Reasoning: Reasoning LLMs with Tools for the Deep Research. arXiv preprint arXiv:2502.04644 (2025).", + "[93] Wenjie Wu, Yongcheng Jing, Yingjie Wang, Wenbin Hu, and Dacheng Tao. 2025. Graph-augmented reasoning: Evolving step-by-step knowledge graph retrieval for llm reasoning. arXiv preprint arXiv:2503.01642 (2025).", + "[94] Zekun Xi, Wenbiao Yin, Jizhan Fang, Jialong Wu, Runnan Fang, Ningyu Zhang, Jiang Yong, Pengjun Xie, Fei Huang, and Huajun Chen. 2025. OmniThink: Expanding Knowledge Boundaries in Machine Writing through Thinking. arXiv preprint arXiv:2501.09751 (2025)." + ], + "bbox": [ + 89, + 92, + 483, + 883 + ], + "page_idx": 33 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[95] Liang Xiao, Wen Dai, Shuai Chen, Bin Qin, Chongyang Shi, Haopeng Jing, and Tianyu Guo. 2025. Retrieval-Augmented Generation by Evidence Retroactivity in LLMs. arXiv preprint arXiv:2501.05475 (2025).", + "[96] Guangzhi Xiong, Qiao Jin, Xiao Wang, Yin Fang, Haolin Liu, Yifan Yang, Fangyuan Chen, Zhixing Song, Dengyu Wang, Minjia Zhang, et al. 2025. Rag-gym: Optimizing reasoning and search agents with process supervision. arXiv preprint arXiv:2502.13957 (2025).", + "[97] Guanming Xiong, Haochen Li, and Wen Zhao. 2025. MCTS-KBQA: Monte Carlo Tree Search for Knowledge Base Question Answering. arXiv preprint arXiv:2502.13428 (2025).", + "[98] Ruibin Xiong, Yimeng Chen, Dmitrii Khizbullin, and Jürgen Schmidhuber. 2025. Beyond Outlining: Heterogeneous Recursive Planning for Adaptive Long-form Writing with Language Models. arXiv preprint arXiv:2503.08275 (2025).", + "[99] Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, et al. 2025. Towards Large Reasoning Models: A Survey of Reinforced Reasoning with Large Language Models. arXiv preprint arXiv:2501.09686 (2025).", + "[100] Zhipeng Xu, Zhenghao Liu, Yukun Yan, Shuo Wang, Shi Yu, Zheni Zeng, Chaojun Xiao, Zhiyuan Liu, Ge Yu, and Chenyan Xiong. 2024. ActiveRAG: Autonomous Knowledge Assimilation and Accommodation through Retrieval-Augmented Agents. arXiv preprint arXiv:2402.13547 (2024).", + "[101] Ruiran Yan, Zheng Liu, and Defu Lian. 2025. O1 embedder: Let retrievers think before action. arXiv preprint arXiv:2502.07555 (2025).", + "[102] Xiaoming Zhang, Ming Wang, Xiaocui Yang, Daling Wang, Shi Feng, and Yifei Zhang. 2024. Hierarchical Retrieval-Augmented Generation Model with Rethink for Multi-hop Question Answering. arXiv preprint arXiv:2408.11875 (2024).", + "[103] Zhuocheng Zhang, Yang Feng, and Min Zhang. 2025. LevelRAG: Enhancing Retrieval-Augmented Generation with Multi-hop Logic Planning over Rewriting Augmented Searchers. arXiv preprint arXiv:2502.18139 (2025).", + "[104] Bowen Zhao, Zander Brumbaugh, Yizhong Wang, Hannaneh Hajishirzi, and Noah A Smith. 2024. Set the clock: Temporal alignment of pretrained language models. arXiv preprint arXiv:2402.16797 (2024).", + "[105] Xuejiao Zhao, Siyan Liu, Su-Yin Yang, and Chunyan Miao. 2025. MedRAG: Enhancing Retrieval-augmented Generation with Knowledge Graph-Elicited Reasoning for Healthcare Copilot. arXiv preprint arXiv:2502.04413 (2025).", + "[106] Yuxiang Zheng, Dayuan Fu, Xiangkun Hu, Xiaojie Cai, Lyumanshan Ye, Pengrui Lu, and Pengfei Liu. 2025. DeepResearcher: Scaling Deep Research via Reinforcement Learning in Real-world Environments. arXiv preprint arXiv:2504.03160 (2025).", + "[107] Yijie Zhong, Feifan Wu, Mengying Guo, Xiaolian Zhang, Meng Wang, and Haofen Wang. 2025. Meta-PKE: Memory-Enhanced Task-Adaptive Personal Knowledge Extraction in Daily Life. Information Processing & Management 62, 4 (2025), 104097.", + "[108] Yujia Zhou, Zheng Liu, Jiajie Jin, Jian-Yun Nie, and Zhicheng Dou. 2024. Metacognitive retrieval-augmented large language models. In Proceedings of the ACM Web Conference 2024. 1453-1463.", + "[109] Jiachen Zhu, Congmin Zheng, Jianghao Lin, Kounianhua Du, Ying Wen, Yong Yu, Jun Wang, and Weinan Zhang. 2025. Retrieval-Augmented Process Reward Model for Generalizable Mathematical Reasoning. arXiv preprint arXiv:2502.14361 (2025).", + "[110] Rongzhi Zhu, Xiangyu Liu, Zequn Sun, Yiwei Wang, and Wei Hu. 2025. Mitigating Lost-in-Retrieval Problems in Retrieval Augmented Multi-Hop Question Answering. arXiv preprint arXiv:2502.14245 (2025)." + ], + "bbox": [ + 517, + 92, + 911, + 859 + ], + "page_idx": 33 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 59, + 346, + 71 + ], + "page_idx": 33 + }, + { + "type": "header", + "text": "Gao et al.", + "bbox": [ + 857, + 59, + 911, + 71 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Appendix", + "text_level": 1, + "bbox": [ + 83, + 90, + 176, + 107 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Agentic RAG Symbol Reference System", + "text_level": 1, + "bbox": [ + 83, + 109, + 379, + 125 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "The following table presents a complete symbol reference system with formally defined mathematical notations for all core concepts.", + "bbox": [ + 81, + 128, + 480, + 175 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Symbol Design Hierarchy", + "text_level": 1, + "bbox": [ + 83, + 185, + 277, + 200 + ], + "page_idx": 34 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Base states/actions: Standard font $(S_{t},a_{t})$", + "- Sets/spaces: Calligraphic font $(\\mathcal{A},\\mathcal{K}_t)$", + "- Core mechanism functions: Uppercase Greek $(\\Psi, \\Gamma)$", + "- Operational functions: Calligraphic font $(\\mathcal{R},\\mathcal{T}_a)$" + ], + "bbox": [ + 109, + 204, + 480, + 265 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- Auxiliary functions: Lowercase Greek $(\\delta, \\phi)$ or blackboard bold (I)", + "bbox": [ + 540, + 90, + 915, + 121 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Annotation Guidelines", + "text_level": 1, + "bbox": [ + 514, + 133, + 689, + 146 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- Symbol disambiguation:", + "bbox": [ + 540, + 152, + 736, + 166 + ], + "page_idx": 34 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- $\\mathcal{R}$ strictly denotes retrieval function (vs. reward $R$ )", + "- $\\delta$ exclusively represents state transitions (vs. branch selector $\\psi$ )" + ], + "bbox": [ + 553, + 167, + 911, + 212 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- Dynamic extensions:", + "bbox": [ + 540, + 213, + 712, + 227 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- Action space $\\mathcal{A}$ and knowledge base $\\mathcal{K}_t$ support incremental updates: $\\mathcal{K}_{t + 1} = \\mathcal{K}_t\\oplus \\mathrm{Retrieve}(q_t)$", + "bbox": [ + 553, + 227, + 911, + 258 + ], + "page_idx": 34 + }, + { + "type": "header", + "text": "Synergizing RAG and Reasoning: A Systematic Review", + "bbox": [ + 84, + 59, + 380, + 71 + ], + "page_idx": 34 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 651, + 59, + 913, + 71 + ], + "page_idx": 34 + }, + { + "type": "table", + "img_path": "images/ea63eb4b161da6fe9a953b4d2d131a0946aee8e4aa2d5f88da7c9c7c4c90820f.jpg", + "table_caption": [ + "Table 3. Basic states and system components" + ], + "table_footnote": [], + "table_body": "
SymbolTypeDefinition & Description
St=(Ht,Ct)Composite stateComplete system state at timestep t, containing historical information and context vectors
HtVector/SetHistorical information aggregation
CtVectorContextual embedding vectors
qtVectorVector representation of current query at step t
KtSetDynamic knowledge base ( Initialized as K0=∅)
", + "bbox": [ + 165, + 117, + 831, + 205 + ], + "page_idx": 35 + }, + { + "type": "table", + "img_path": "images/3b93db755ec01b5c451d164e3cc6cc14ab28488e52aa26dae925d402e660ea21.jpg", + "table_caption": [ + "Table 4. Action space and policy definitions" + ], + "table_footnote": [], + "table_body": "
SymbolTypeDefinition & Description
ASetAction space, e.g., A = {Retrieve, Generate, Verify, Terminate}
atScalarSelected action at timestep t (at ∈ A)
π(St; Θ)FunctionPolicy function with parameters Θ, mapping states to action probability distributions (π: S → Δ(A))
", + "bbox": [ + 166, + 243, + 831, + 305 + ], + "page_idx": 35 + }, + { + "type": "table", + "img_path": "images/1d848e6509a2fa6c7cfdf8b5bb7b9054f778a70c266ccc426354134081ddf86d.jpg", + "table_caption": [ + "Table 5. State transition mechanisms" + ], + "table_footnote": [], + "table_body": "
SymbolTypeDefinition & Description
δFunctionState transition function, update rule St+1 = δ(St, ·)
TaFunctionLow-level state transition operation for action a (e.g., TRetrieve denotes retrieval)
RFunctionRetrieval function, R(St) returns retrieval results
OperatorFunction composition operator (e.g., f∘g(x) = f(g(x)))
", + "bbox": [ + 163, + 342, + 828, + 431 + ], + "page_idx": 35 + }, + { + "type": "table", + "img_path": "images/4d1bcbeaa0e278e95266fc849a9e6ebfbf2b494143d1d43c264d056f0d8e6ecf.jpg", + "table_caption": [ + "Table 6. Feedback and optimization components" + ], + "table_footnote": [], + "table_body": "
SymbolTypeDefinition & Description
R(St, at, St+1)FunctionReward function, outputs reward value rt
I(·)FunctionIndicator function (returns 1 if condition holds, else 0)
∇θJ(θ)OperatorPolicy gradient for optimizing policy parameters Θ
γScalarDiscount factor for cumulative reward calculation
", + "bbox": [ + 205, + 469, + 790, + 568 + ], + "page_idx": 35 + }, + { + "type": "table", + "img_path": "images/d1f3e9f6c15da1afab15a3d990fdc1295647b11a1816d189b65c2a07934958e2.jpg", + "table_caption": [ + "Table 7. Submodule-specific symbols" + ], + "table_footnote": [], + "table_body": "
SymbolTypeDefinition & Description
ΨFunctionReasoning function, generates intermediate reasoning results
ΓFunctionDecision function, produces final outputs (e.g., answers)
ψ(·)FunctionBranch selector for reflective reasoning path selection
φ(·)FunctionConfidence mapping function (evaluations to scalar confidence)
τScalarDecision threshold for triggering specific operations (e.g., verification/termination)
", + "bbox": [ + 166, + 606, + 831, + 709 + ], + "page_idx": 35 + }, + { + "type": "header", + "text": "Conference'17, July 2017, Washington, DC, USA", + "bbox": [ + 84, + 59, + 346, + 71 + ], + "page_idx": 35 + }, + { + "type": "header", + "text": "Gao et al.", + "bbox": [ + 857, + 59, + 911, + 71 + ], + "page_idx": 35 + } +] \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15909/41ce0363-307a-4461-bbaf-6fdf5036b2e7_model.json b/data/2025/2504_15xxx/2504.15909/41ce0363-307a-4461-bbaf-6fdf5036b2e7_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a655b871a609066ed21590325d6d521d43e55618 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/41ce0363-307a-4461-bbaf-6fdf5036b2e7_model.json @@ -0,0 +1,7312 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.273, + 0.061, + 0.708 + ], + "angle": 270, + "content": "arXiv:2504.15909v2 [cs.IR] 24 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.088, + 0.915, + 0.119 + ], + "angle": 0, + "content": "Synergizing RAG and Reasoning: A Systematic Review" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.129, + 0.28, + 0.144 + ], + "angle": 0, + "content": "Yunfan Gao" + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.146, + 0.34, + 0.16 + ], + "angle": 0, + "content": "Shanghai Research Institute for" + }, + { + "type": "text", + "bbox": [ + 0.12, + 0.162, + 0.345, + 0.176 + ], + "angle": 0, + "content": "Intelligent Autonomous Systems," + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.177, + 0.291, + 0.191 + ], + "angle": 0, + "content": "Tongji University" + }, + { + "type": "text", + "bbox": [ + 0.21, + 0.192, + 0.252, + 0.204 + ], + "angle": 0, + "content": "China" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.207, + 0.325, + 0.222 + ], + "angle": 0, + "content": "gaoyunfan1602@gmail.com" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.233, + 0.262, + 0.248 + ], + "angle": 0, + "content": "Yuxi Bi" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.25, + 0.347, + 0.264 + ], + "angle": 0, + "content": "College of Design and Innovation," + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.265, + 0.29, + 0.28 + ], + "angle": 0, + "content": "Tongji University" + }, + { + "type": "text", + "bbox": [ + 0.21, + 0.281, + 0.252, + 0.293 + ], + "angle": 0, + "content": "China" + }, + { + "type": "text", + "bbox": [ + 0.167, + 0.296, + 0.295, + 0.31 + ], + "angle": 0, + "content": "yuxibi@gmail.com" + }, + { + "type": "text", + "bbox": [ + 0.455, + 0.129, + 0.543, + 0.146 + ], + "angle": 0, + "content": "Yun Xiong" + }, + { + "type": "text", + "bbox": [ + 0.388, + 0.147, + 0.612, + 0.161 + ], + "angle": 0, + "content": "Shanghai Key Laboratory of Data" + }, + { + "type": "text", + "bbox": [ + 0.375, + 0.162, + 0.625, + 0.176 + ], + "angle": 0, + "content": "Science, School of Computer Science," + }, + { + "type": "text", + "bbox": [ + 0.441, + 0.177, + 0.558, + 0.19 + ], + "angle": 0, + "content": "Fudan University" + }, + { + "type": "text", + "bbox": [ + 0.477, + 0.192, + 0.52, + 0.204 + ], + "angle": 0, + "content": "China" + }, + { + "type": "text", + "bbox": [ + 0.431, + 0.207, + 0.567, + 0.221 + ], + "angle": 0, + "content": "yunx@fudan.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.458, + 0.233, + 0.54, + 0.249 + ], + "angle": 0, + "content": "Ming Xue" + }, + { + "type": "text", + "bbox": [ + 0.461, + 0.25, + 0.537, + 0.263 + ], + "angle": 0, + "content": "Percena AI" + }, + { + "type": "text", + "bbox": [ + 0.478, + 0.266, + 0.52, + 0.278 + ], + "angle": 0, + "content": "China" + }, + { + "type": "text", + "bbox": [ + 0.437, + 0.281, + 0.562, + 0.294 + ], + "angle": 0, + "content": "mxue@percena.co" + }, + { + "type": "text", + "bbox": [ + 0.719, + 0.129, + 0.815, + 0.146 + ], + "angle": 0, + "content": "Yijie Zhong" + }, + { + "type": "text", + "bbox": [ + 0.654, + 0.147, + 0.883, + 0.161 + ], + "angle": 0, + "content": "College of Design and Innovation," + }, + { + "type": "text", + "bbox": [ + 0.708, + 0.162, + 0.827, + 0.176 + ], + "angle": 0, + "content": "Tongji University" + }, + { + "type": "text", + "bbox": [ + 0.746, + 0.177, + 0.789, + 0.188 + ], + "angle": 0, + "content": "China" + }, + { + "type": "text", + "bbox": [ + 0.692, + 0.191, + 0.843, + 0.206 + ], + "angle": 0, + "content": "dun.haski@gmail.com" + }, + { + "type": "text", + "bbox": [ + 0.708, + 0.233, + 0.826, + 0.25 + ], + "angle": 0, + "content": "Haofen Wang*" + }, + { + "type": "text", + "bbox": [ + 0.654, + 0.25, + 0.882, + 0.264 + ], + "angle": 0, + "content": "College of Design and Innovation," + }, + { + "type": "text", + "bbox": [ + 0.708, + 0.265, + 0.826, + 0.28 + ], + "angle": 0, + "content": "Tongji University" + }, + { + "type": "text", + "bbox": [ + 0.746, + 0.281, + 0.788, + 0.293 + ], + "angle": 0, + "content": "China" + }, + { + "type": "text", + "bbox": [ + 0.671, + 0.296, + 0.864, + 0.31 + ], + "angle": 0, + "content": "carter.whfcarter@gmail.com" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.32, + 0.164, + 0.334 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.339, + 0.485, + 0.673 + ], + "angle": 0, + "content": "Recent breakthroughs in large language models (LLMs), particularly in reasoning capabilities, have propelled Retrieval-Augmented Generation (RAG) to unprecedented levels. By synergizing retrieval mechanisms with advanced reasoning, LLMs can now tackle increasingly complex problems. This paper presents a systematic review of the collaborative interplay between RAG and reasoning, clearly defining \"reasoning\" within the RAG context. It construct a comprehensive taxonomy encompassing multi-dimensional collaborative objectives, representative paradigms, and technical implementations, and analyze the bidirectional synergy methods. Additionally, we critically evaluate current limitations in RAG assessment, including the absence of intermediate supervision for multi-step reasoning and practical challenges related to cost-risk trade-offs. To bridge theory and practice, we provide practical guidelines tailored to diverse real-world applications. Finally, we identify promising research directions, such as graph-based knowledge integration, hybrid model collaboration, and RL-driven optimization. Overall, this work presents a theoretical framework and practical foundation to advance RAG systems in academia and industry, fostering the next generation of RAG solutions." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.697, + 0.232, + 0.712 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.717, + 0.483, + 0.869 + ], + "angle": 0, + "content": "Recent breakthroughs in Large Language Models (LLMs) like OpenAI O1 [39] and DeepSeek-R1 [25] have shifted the paradigm from \"pre-training scaling\" to \"test-time scaling\" [63]. Unlike traditional language models that improve via corpus accumulation during pre-training, these models enhance performance in complex tasks—such as mathematical derivation and code generation [29]—through post-training innovations during the inference phase (e.g., Long-CoT thinking [8]). This shift has led to the emergence of \"Large Reasoning Models\" (LRMs) [99] with advanced internal reasoning abilities." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.32, + 0.917, + 0.47 + ], + "angle": 0, + "content": "These advancements have not only boosted basic model capabilities but also opened new avenues for application technologies like Retrieval-Augmented Generation (RAG) [21]. Serving as a key link between language models and external knowledge, RAG overcomes traditional LLMs' limits in knowledge freshness, domain specificity, and factual accuracy by retrieving real-time non-parametric information and integrating it into the context. This enhances information processing and reduces hallucination risks in knowledge-intensive tasks." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.471, + 0.916, + 0.578 + ], + "angle": 0, + "content": "Technological evolution is advancing RAG architectures through innovations like query rewriting [61], re-ranking [1], and hybrid retrieval [88], creating an Advanced RAG paradigm focused on pre-retrieval optimization and post-retrieval refinement. Modular RAG [22] further breaks down these systems into component-based, service-oriented architectures, using orchestration to tackle practical challenges." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.578, + 0.915, + 0.683 + ], + "angle": 0, + "content": "Despite improvements in query intent recognition and knowledge use, challenges of RAG remain in demanding tasks like deep research and complex decision-making. Key issues include: 1) difficulty capturing intent from ambiguous queries; 2) poor logical coherence in multi-hop reasoning; 3) efficiency limits of traditional retrieval in open domains; and 4) degraded generation quality from noisy retrieved data." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.683, + 0.916, + 0.895 + ], + "angle": 0, + "content": "Models like DeepSeek-R1, with strong reasoning capabilities, inspire new directions for RAG systems. As shown in Figure 1, recent research explores integrating formal reasoning frameworks with knowledge retrieval. This approach optimizes retrieval through logic-driven query reformulation and uses reasoning to analyze and validate retrieved knowledge, creating cognitive synergy between retrieval and generation. This paradigm aims to overcome conventional limitations, enabling intelligent systems with rigorous logic and reliable knowledge use. From a trend perspective, an increasing number of methods combine reasoning and retrieval abilities through reinforcement learning (RL), marking a new direction in the LRM era. Meanwhile, prompt-based approaches continue to rapidly evolve, with researchers aiming" + }, + { + "type": "page_footnote", + "bbox": [ + 0.084, + 0.894, + 0.214, + 0.907 + ], + "angle": 0, + "content": "*Corresponding Author" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.347, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.858, + 0.06, + 0.912, + 0.071 + ], + "angle": 0, + "content": "Gao et al." + }, + { + "type": "image", + "bbox": [ + 0.087, + 0.089, + 0.913, + 0.423 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.435, + 0.914, + 0.511 + ], + "angle": 0, + "content": "Figure 1. Timeline of studies on RAG-reasoning synergy. From a technical perspective, the approaches can be categorized into Prompt-Based, Tuning-Based, and RL-Based methods. A notable trend is the increasing use of Reinforcement Learning to enhance RAG systems, particularly following the prosperity of test-time scaling. Meanwhile, Prompt-Based and Tuning-Based methods continue to evolve in parallel, demonstrating that there are multiple pathways to integrating reasoning capabilities into RAG systems." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.531, + 0.483, + 0.591 + ], + "angle": 0, + "content": "to achieve results through workflow design while keeping model parameters frozen. Notably, sole reliance on tuning methods is steadily decreasing, suggesting limited improvements from additional fine-tuning at this stage." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.591, + 0.483, + 0.68 + ], + "angle": 0, + "content": "Traditional RAG is limited by its unidirectional flow (retrieval \\(\\rightarrow\\) generation). Integrating reasoning capabilities grants the system greater autonomy, unlocking new possibilities. As shown in Figure 2, this integration is poised to drive major breakthroughs, enabling practical use in complex real-world scenarios." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.682, + 0.483, + 0.879 + ], + "angle": 0, + "content": "1) From Ambiguous Semantic Matching to Logic-Driven Targeted Retrieval. Traditional RAG relies on semantic similarity for retrieval; however, it is sensitive to phrasing variations. Advanced reasoning allows deep logical analysis of queries (e.g., causal links, conditional constraints) to dynamically refine retrieval strategies [24]. For example, to answer \"How to reduce postoperative infection risks in diabetes patients?\", the system prioritizes retrieving \"blood glucose control thresholds\" and \"antibiotic usage guidelines\" over simply matching \"diabetes postoperative care\". This approach supports multi-hop retrieval by breaking down complex queries into sequential sub-queries while preserving cross-document coherence through reasoning chains." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.531, + 0.915, + 0.682 + ], + "angle": 0, + "content": "2) From Simple Information Aggregation to Logically Coherent Context Construction. Current RAG systems input all retrieved document chunks into context directly, often causing fragmented or contradictory information that confuses LLMs. Reasoning-enhanced systems integrate evidence chains by logically verifying and inferring causality in retrieved content, filtering conflicts and forming coherent explanations [100]. They also use dynamic knowledge completion to detect missing logical links, prompting iterative retrieval or inference to fill gaps [51]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.682, + 0.915, + 0.833 + ], + "angle": 0, + "content": "3) From Simple and Single-Turn QA to Systemic Decision Support. Traditional RAG performs well in factual QA [65] but struggles with multi-step and complex decision-making. Reasoning-integrated systems produce structured reasoning output, enhancing multi-objective optimization to balance retrieval breadth and solution feasibility under various constraints. For example, multiple constraints under different conditions in engineering construction plans [54], and the formulation of diagnosis and treatment plans for various diseases in the medical field [105]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.833, + 0.915, + 0.895 + ], + "angle": 0, + "content": "4) From Indiscriminate Retrieval to Intelligent Resource Allocation. Traditional RAG retrieves documents for all queries, regardless of complexity. Reasoning-enhanced systems use on-demand retrieval, handling simple queries" + }, + { + "type": "list", + "bbox": [ + 0.513, + 0.531, + 0.915, + 0.895 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.382, + 0.073 + ], + "angle": 0, + "content": "Synergizing RAG and Reasoning: A Systematic Review" + }, + { + "type": "header", + "bbox": [ + 0.653, + 0.06, + 0.914, + 0.073 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.089, + 0.916, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.303, + 0.378, + 0.695, + 0.394 + ], + "angle": 0, + "content": "Figure 2. Advantages of Combining RAG with Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.413, + 0.483, + 0.474 + ], + "angle": 0, + "content": "with direct generation and complex ones with multi-round retrieval to reduce latency [20]. Dynamic retrieval pruning uses pre-reasoning predictions to target key information, minimizing unnecessary document and graph traversal [41]." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.474, + 0.483, + 0.61 + ], + "angle": 0, + "content": "5) From Passive Knowledge Tool to Proactive Cognitive Assistant. Advancing beyond reactive knowledge retrieval, reasoning-enhanced systems can proactively serve users by asking clarifying questions and anticipating implicit needs. This shift enables human-like assistants that integrate memory, reasoning, and decision-making, proving especially valuable for complex tasks such as deep research [43], business analytics [50], personal assistant [107] and urban planning [85]." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.61, + 0.483, + 0.729 + ], + "angle": 0, + "content": "However, the synergistic pathway between RAG and reasoning requires more than simply replacing conventional generative LLMs with LRM modules. It necessitates deep integration of technological evolution insights from LRM - achieved through reconstructing knowledge retrieval mechanisms and strengthening reasoning-generation collaborative linkages - to enable system-level enhancement of cognitive capabilities within the RAG architecture." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.73, + 0.483, + 0.866 + ], + "angle": 0, + "content": "Therefore, this paper aims to address the pivotal and forward-looking research question of \"how RAG systems can synergize with reasoning capabilities\". We systematically review current studies after 2024 while establishing explicit definitions for reasoning within RAG contexts. Building on this foundation, we provide an in-depth taxonomy and analysis of the objectives, typical patterns, and implementations underlying RAG-reasoning integration, clarifying key technological trajectories and critical breakthroughs." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.866, + 0.483, + 0.897 + ], + "angle": 0, + "content": "As RAG technology enters its next developmental phase, downstream task complexity has escalated significantly -" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.413, + 0.916, + 0.579 + ], + "angle": 0, + "content": "particularly evident in emerging challenges like Deep Research [106]. These advanced applications not only demand enhanced reasoning capacities but also drive RAG's expansion into multimodal, cross-domain, and dynamic environments. However, while the integration of reasoning capabilities demonstrably improves complex task performance, existing research frequently overlooks associated computational overheads and potential risks. Through systematic examination of these operational constraints and analysis of industry applications, we propose practical guidelines for multiple real-world scenarios with diverse requirements." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.58, + 0.915, + 0.685 + ], + "angle": 0, + "content": "Finally, we outline future research directions grounded in current technological evolution, including: 1) RAG-graph architecture integration, 2) coordinated multimodal reasoning frameworks, 3) hybrid model collaboration, and 4) RL optimization specifically designed for RAG systems. This work establishes both theoretical foundations and practical roadmaps for subsequent research in this evolving field." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.686, + 0.913, + 0.713 + ], + "angle": 0, + "content": "The contributions of this paper can be summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.726, + 0.915, + 0.8 + ], + "angle": 0, + "content": "- Pioneering Review. This work represents the first comprehensive survey focusing on the integration of RAG with reasoning, offering novel insights and forward-looking guidance for advancing this emerging research frontier." + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.801, + 0.928, + 0.875 + ], + "angle": 0, + "content": "- Systematic Taxonomy. We present a multi-dimensional framework to systematically examine the objectives, paradigms, and methodologies for combining RAG with reasoning capabilities, establishing clear classification criteria across technical dimensions." + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.877, + 0.915, + 0.907 + ], + "angle": 0, + "content": "- Practical Guidance. Beyond theoretical exploration, we critically discuss the additional cost and potential" + }, + { + "type": "list", + "bbox": [ + 0.541, + 0.726, + 0.928, + 0.907 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.347, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.858, + 0.06, + 0.912, + 0.071 + ], + "angle": 0, + "content": "Gao et al." + }, + { + "type": "text", + "bbox": [ + 0.123, + 0.092, + 0.483, + 0.135 + ], + "angle": 0, + "content": "risks associated with the introduction of reasoning, accompanied by an actionable Practical Guide for real-world scenarios." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.137, + 0.483, + 0.197 + ], + "angle": 0, + "content": "- Open Resource Platform1 Through the OpenRAG platform, we provide a rich, multi-dimensional review of related work, which allows readers to quickly search and compare different methods." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.212, + 0.203, + 0.227 + ], + "angle": 0, + "content": "2 Overview" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.232, + 0.483, + 0.324 + ], + "angle": 0, + "content": "This chapter establishes a conceptual framework for the paper along two key dimensions. First, it formally defines \"reasoning\" and distinguishes it from \"inference.\" Second, it organizes a taxonomy of synergy mechanisms between \"RAG and Reasoning.\" To construct a clear cognitive pathway, we address three progressive research questions:" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.328, + 0.388, + 0.342 + ], + "angle": 0, + "content": "- Why synergize RAG and reasoning?" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.343, + 0.481, + 0.358 + ], + "angle": 0, + "content": "- What are their typical collaboration paradigms?" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.358, + 0.397, + 0.373 + ], + "angle": 0, + "content": "- How can this integration be realized?" + }, + { + "type": "list", + "bbox": [ + 0.11, + 0.328, + 0.481, + 0.373 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.387, + 0.203, + 0.4 + ], + "angle": 0, + "content": "2.1 Definition" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.406, + 0.492, + 0.616 + ], + "angle": 0, + "content": "The definition of reasoning in modern AI systems remains an evolving construct, particularly within the context of LRMs exemplified by DeepSeek R1 and OpenAI O1. Here, under the scope of LLMs, we formalize reasoning as a structured, multi-step process that dynamically decomposes complex problems, generates intermediate hypotheses, and iteratively refines solutions through logical and evidence-based transformations. Mathematically, let a reasoning process \\(\\mathcal{R}\\) be defined as a tuple \\(\\langle \\mathcal{K}_p, \\mathcal{K}_r, S_t, \\Phi \\rangle\\), where \\(\\mathcal{K}_p\\) denotes parametric knowledge embeddings, \\(\\mathcal{K}_r\\) represents retrieved contextual knowledge, \\(S_t = \\{s_0, s_1, \\ldots, s_n\\}\\) constitutes the evolving state sequence with \\(s_0\\) as the initial query and \\(s_n\\) as the final response, and \\(\\Phi : S_i \\times \\mathcal{K}_p \\times \\mathcal{K}_r \\to S_{i+1}\\) defines the state transition function." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.617, + 0.483, + 0.875 + ], + "angle": 0, + "content": "The reasoning process exhibits three defining characteristics. First, it is inherently multi-step, systematically decomposing complex problems into intermediate cognitive states (e.g., sub-question generation or temporary conclusions) rather than pursuing direct input-output mapping. Second, it generates novel knowledge or facts – synthesizing implicit relationships, deriving latent constraints, or reformulating problems in ways not explicitly present in the initial input or parametric memory (e.g., transforming \"Is A greater than B?\" into comparative subquestions about A and B's attributes). Crucially, these representations are not merely retrieved but dynamically constructed through the reasoning trajectory. Third, the process is teleological – its architecture and termination conditions are explicitly optimized for complex problem resolution, where complexity is measured by the necessity of state transitions or the insufficiency of direct retrieval from either parametric \\((\\mathcal{K}_p)\\)" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.915, + 0.137 + ], + "angle": 0, + "content": "or external \\((\\mathcal{K}_r)\\) knowledge sources. This stands in stark contrast to atomic inference, which lacks such deliberate state construction and goal-aware iteration." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.137, + 0.915, + 0.364 + ], + "angle": 0, + "content": "The distinction between reasoning and inference manifests most saliently in their computational signatures. While inference \\(\\mathcal{I}\\) constitutes a single-step conditional probability computation \\(P(y|x) = \\prod_{t=1}^{T} P(y_t|x, y_{, ) to steer model behavior, tuning-based methods that inject domain-specific knowledge or distill reasoning capability, and RL-based frameworks that optimize retrieval-reasoning policies through outcome reward models (ORM) or process reward models (PRM). The alignment between these" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.086, + 0.06, + 0.347, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.859, + 0.06, + 0.912, + 0.071 + ], + "angle": 0, + "content": "Gao et al." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.092, + 0.483, + 0.168 + ], + "angle": 0, + "content": "methodologies and the proposed taxonomy is critical—static workflows predominantly rely on predictable prompt-guided reasoning chains, whereas dynamic systems increasingly integrate search-based exploration or solver-augmented strategies to navigate evolving state spaces." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.168, + 0.483, + 0.274 + ], + "angle": 0, + "content": "Overall, this tripartite taxonomy—motivational drivers, architectural paradigms, and implementation methodologies—establishes a unified lens for analyzing RAG+Reasoning systems. Subsequent chapters will elaborate on each stratum, progressively revealing how these conceptual distinctions translate into technical innovations that push the boundaries of machine intelligence." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.308, + 0.36, + 0.325 + ], + "angle": 0, + "content": "3 The purpose of the synergy" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.327, + 0.483, + 0.477 + ], + "angle": 0, + "content": "The integration of RAG and reasoning marks a crucial advancement in enhancing LLMs' problem-solving abilities. Their true potential lies not in isolated use but in their synergy, which overcomes key limitations in retrieval and reasoning. This section explains the main motivations for combining RAG with reasoning, emphasizing two primary benefits: (1) enhancing retrieval accuracy and flexibility through reasoning, and (2) reinforcing complex reasoning by using context-rich retrieved knowledge. Figure 4 illustrates these collaborative aims and the limitations they address." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.478, + 0.483, + 0.614 + ], + "angle": 0, + "content": "The first key benefit is Reasoning-Augmented Retrieval where reasoning improves the retrieval process. Traditional RAG systems struggle with query formulation, relevance assessment, and iterative refinement—tasks needing logical and contextual analysis. Reasoning enables adaptive retrieval through dynamic query expansion, ambiguity resolution, and multi-hop evidence aggregation, overcoming the limits of keyword- or embedding-based methods and aligning retrieval with the task's reasoning demands." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.614, + 0.498, + 0.734 + ], + "angle": 0, + "content": "The second benefit is Retrieval-Augmented Reasoning, where external knowledge supplements the limitations of purely parametric LLM reasoning. Even advanced models face hallucination, knowledge gaps, and compositional challenges alone. Retrieval grounds reasoning in up-to-date, domain-specific, or rare information absent from model weights, crucial for explainability, multi-step deduction, and integrating diverse sources." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.735, + 0.483, + 0.795 + ], + "angle": 0, + "content": "Together, combining RAG and reasoning fills fundamental gaps in both techniques. By enhancing retrieval via reasoning and strengthening reasoning through retrieval, it broadens LLMs' capacity to address complex real-world problems." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.829, + 0.365, + 0.843 + ], + "angle": 0, + "content": "3.1 Reasoning-Augmented Retrieval" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.847, + 0.482, + 0.907 + ], + "angle": 0, + "content": "Reasoning-Augmented Retrieval (RAR) represents a significant advancement in information retrieval by integrating multi-step reasoning to dynamically enhance retrieval quality. Unlike traditional methods that depend on static semantic" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.092, + 0.913, + 0.137 + ], + "angle": 0, + "content": "matching, RAR creates a cognitive feedback loop mimicking human iterative reasoning, surpassing the limitations of simple \"query-document\" interactions." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.137, + 0.915, + 0.394 + ], + "angle": 0, + "content": "RAR's effectiveness stems from several key features. It often uses on-demand retrieval, where reasoning-evaluating intent clarity, knowledge state, and temporal factors-guides adaptive search initiation, reducing redundancies present in fixed triggers (e.g., UAR's classifier [14]). It improves semantic alignment by inferring implicit query logic such as business rules or entity relationships to generate precise retrieval requests aligned with data schemas (e.g., PlanRAG's plan-retrieval loops [48]). RAR also applies multi-step iterative refinement, using intermediate reasoning outputs (e.g., chain-of-thought, partial answers [78]) to recursively reformulate queries in a closed-loop system essential for resolving multi-hop dependencies [68]. Furthermore, it adapts to specific domains by tailoring retrieval to vertical contexts (e.g., financial or medical) and balances efficiency and precision through lightweight reasoning strategies (e.g., AdaptiveRAG's complexity-based selection [41])." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.395, + 0.913, + 0.483 + ], + "angle": 0, + "content": "Traditional retrieval systems, effective for simple queries, struggle with complex information needs due to rigid designs favoring static matching over dynamic reasoning, limiting their adaptability to changing contexts and diverse data. RAR primarily addresses five core challenges inherent in these conventional methods." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.492, + 0.915, + 0.779 + ], + "angle": 0, + "content": "3.1.1 Semantic Disparities Between Queries and Documents. A key challenge lies in the mismatch between user queries and documents—whether due to differing expression styles (professional jargon vs. casual language) or implicit contextual gaps—making direct semantic matching unreliable. Importantly, high similarity does not guarantee true relevance, as documents may share keywords or surface features without addressing the underlying intent or logic of the query. Retrieval models must therefore understand deeper semantics beyond superficial similarity. Domain adaptation further complicates this issue. To overcome these gaps, approaches such as reasoning-augmented embeddings (O1-Embedder [101] enriches queries with inferred \"thinking\" text), feedback-driven rewriting (SmartRAG [20] dynamically refines queries based on retrieved results), and preplanning (PlanRAG [48] extracts business rules to generate SQL queries aligned with database schemas) help better capture domain-specific semantics and ensure relevance beyond mere similarity." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.786, + 0.913, + 0.907 + ], + "angle": 0, + "content": "3.1.2 Inflexible Intent Disambiguation. Traditional RAG methods rely on fixed embedding similarity strategies, which fail to dynamically interpret the implicit intent behind complex queries (e.g., multi-hop reasoning or domain-specific requirements). User queries often exhibit semantic complexity that far exceeds their surface text—for instance, a request to \"optimize supply chain costs\" may require correlating disparate database fields not explicitly" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.382, + 0.073 + ], + "angle": 0, + "content": "Synergizing RAG and Reasoning: A Systematic Review" + }, + { + "type": "header", + "bbox": [ + 0.653, + 0.06, + 0.914, + 0.073 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "image", + "bbox": [ + 0.099, + 0.105, + 0.13, + 0.14 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.132, + 0.114, + 0.251, + 0.14 + ], + "angle": 0, + "content": "Core Limitations in RAG" + }, + { + "type": "title", + "bbox": [ + 0.315, + 0.117, + 0.454, + 0.129 + ], + "angle": 0, + "content": "Semantic Disparities" + }, + { + "type": "text", + "bbox": [ + 0.302, + 0.135, + 0.438, + 0.144 + ], + "angle": 0, + "content": "Lexical and contextual disparitie" + }, + { + "type": "text", + "bbox": [ + 0.302, + 0.145, + 0.468, + 0.153 + ], + "angle": 0, + "content": "(e.g., terminology mismatch, implicit" + }, + { + "type": "text", + "bbox": [ + 0.302, + 0.154, + 0.376, + 0.162 + ], + "angle": 0, + "content": "context absence)" + }, + { + "type": "text", + "bbox": [ + 0.305, + 0.165, + 0.465, + 0.173 + ], + "angle": 0, + "content": "Failure of semantic similarity matching" + }, + { + "type": "title", + "bbox": [ + 0.546, + 0.117, + 0.66, + 0.129 + ], + "angle": 0, + "content": "Knowledge Gaps" + }, + { + "type": "text", + "bbox": [ + 0.522, + 0.133, + 0.684, + 0.14 + ], + "angle": 0, + "content": "Long-range reasoning tasks(e.g., multi-" + }, + { + "type": "text", + "bbox": [ + 0.522, + 0.141, + 0.556, + 0.149 + ], + "angle": 0, + "content": "hop QA)" + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.153, + 0.686, + 0.16 + ], + "angle": 0, + "content": "Requiring logical integration across" + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.161, + 0.645, + 0.168 + ], + "angle": 0, + "content": "multiple knowledge segments" + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.171, + 0.686, + 0.178 + ], + "angle": 0, + "content": "Absence of intermediate knowledge" + }, + { + "type": "text", + "bbox": [ + 0.521, + 0.179, + 0.686, + 0.186 + ], + "angle": 0, + "content": "leads to reasoning chain fragmentation" + }, + { + "type": "title", + "bbox": [ + 0.746, + 0.115, + 0.866, + 0.127 + ], + "angle": 0, + "content": "Core Limitations" + }, + { + "type": "title", + "bbox": [ + 0.758, + 0.129, + 0.852, + 0.142 + ], + "angle": 0, + "content": "in Reasoning" + }, + { + "type": "image", + "bbox": [ + 0.866, + 0.11, + 0.897, + 0.141 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.184, + 0.187, + 0.289, + 0.209 + ], + "angle": 0, + "content": "Inflexible Intent Disambiguation" + }, + { + "type": "text", + "bbox": [ + 0.154, + 0.215, + 0.319, + 0.222 + ], + "angle": 0, + "content": "Failure to resolve implicit intents in" + }, + { + "type": "text", + "bbox": [ + 0.154, + 0.223, + 0.225, + 0.23 + ], + "angle": 0, + "content": "complex queries" + }, + { + "type": "text", + "bbox": [ + 0.154, + 0.233, + 0.318, + 0.24 + ], + "angle": 0, + "content": "(e.g., multi-hop reasoning, domain-" + }, + { + "type": "text", + "bbox": [ + 0.154, + 0.241, + 0.247, + 0.248 + ], + "angle": 0, + "content": "specific requirements)" + }, + { + "type": "text", + "bbox": [ + 0.154, + 0.251, + 0.318, + 0.258 + ], + "angle": 0, + "content": "The semantic complexity of user queries" + }, + { + "type": "text", + "bbox": [ + 0.154, + 0.259, + 0.286, + 0.266 + ], + "angle": 0, + "content": "may far exceed their surface text" + }, + { + "type": "title", + "bbox": [ + 0.104, + 0.283, + 0.335, + 0.294 + ], + "angle": 0, + "content": "Heterogeneous Data Collaboration" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.302, + 0.259, + 0.309 + ], + "angle": 0, + "content": "Schema-disparate data sources" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.312, + 0.315, + 0.328 + ], + "angle": 0, + "content": "(e.g., structured records vs. unstructured passages)" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.331, + 0.315, + 0.339 + ], + "angle": 0, + "content": "Requires cross-modal retrieval and alignment" + }, + { + "type": "title", + "bbox": [ + 0.178, + 0.355, + 0.332, + 0.367 + ], + "angle": 0, + "content": "Efficiency vs. Precision" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.373, + 0.333, + 0.381 + ], + "angle": 0, + "content": "Comprehensive Retrieval \\(\\rightarrow\\) Overhead" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.385, + 0.333, + 0.393 + ], + "angle": 0, + "content": "Restricted Retrieval \\(\\rightarrow\\) Critical info loss" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.398, + 0.321, + 0.406 + ], + "angle": 0, + "content": "Iterations \\(\\uparrow \\rightarrow\\) Computational costs" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.41, + 0.33, + 0.418 + ], + "angle": 0, + "content": "Lack of dynamic trade-off mechanism" + }, + { + "type": "title", + "bbox": [ + 0.342, + 0.205, + 0.597, + 0.245 + ], + "angle": 0, + "content": "Reasoning Augmented Retrieval" + }, + { + "type": "title", + "bbox": [ + 0.474, + 0.272, + 0.525, + 0.289 + ], + "angle": 0, + "content": "RAG" + }, + { + "type": "title", + "bbox": [ + 0.439, + 0.326, + 0.553, + 0.345 + ], + "angle": 0, + "content": "Reasoning" + }, + { + "type": "title", + "bbox": [ + 0.496, + 0.361, + 0.593, + 0.377 + ], + "angle": 0, + "content": "Retrieval" + }, + { + "type": "title", + "bbox": [ + 0.423, + 0.381, + 0.662, + 0.4 + ], + "angle": 0, + "content": "Augmented Reasoning" + }, + { + "type": "title", + "bbox": [ + 0.486, + 0.433, + 0.646, + 0.456 + ], + "angle": 0, + "content": "Search Space Explosion & Local Optima Traps" + }, + { + "type": "text", + "bbox": [ + 0.484, + 0.461, + 0.648, + 0.476 + ], + "angle": 0, + "content": "Search space grows exponentially with reasoning steps" + }, + { + "type": "text", + "bbox": [ + 0.484, + 0.479, + 0.648, + 0.494 + ], + "angle": 0, + "content": "Traditional multi-step reasoning methods lack external knowledge constraints" + }, + { + "type": "text", + "bbox": [ + 0.484, + 0.498, + 0.648, + 0.505 + ], + "angle": 0, + "content": "Lead to invalid hypotheses, local optima" + }, + { + "type": "text", + "bbox": [ + 0.484, + 0.506, + 0.614, + 0.512 + ], + "angle": 0, + "content": "traps, or logical inconsistencies" + }, + { + "type": "title", + "bbox": [ + 0.669, + 0.197, + 0.854, + 0.209 + ], + "angle": 0, + "content": "Domain Knowledge Boudary" + }, + { + "type": "text", + "bbox": [ + 0.678, + 0.215, + 0.843, + 0.232 + ], + "angle": 0, + "content": "Pre-trained models exhibit constrained knowledge coverage" + }, + { + "type": "text", + "bbox": [ + 0.678, + 0.235, + 0.841, + 0.25 + ], + "angle": 0, + "content": "Struggle with tasks requiring domain-specific expertise" + }, + { + "type": "text", + "bbox": [ + 0.678, + 0.252, + 0.797, + 0.26 + ], + "angle": 0, + "content": "(e.g., semiconductor design)" + }, + { + "type": "text", + "bbox": [ + 0.679, + 0.263, + 0.843, + 0.279 + ], + "angle": 0, + "content": "Processing tasks requiring real-time information is challenging" + }, + { + "type": "title", + "bbox": [ + 0.726, + 0.29, + 0.859, + 0.312 + ], + "angle": 0, + "content": "Dynamic Knowledge Requirements" + }, + { + "type": "text", + "bbox": [ + 0.695, + 0.318, + 0.89, + 0.326 + ], + "angle": 0, + "content": "Progressively evolving knowledge requirements" + }, + { + "type": "text", + "bbox": [ + 0.698, + 0.331, + 0.888, + 0.345 + ], + "angle": 0, + "content": "Initial retrieval results are irrelevant or redundant to subsequent reasoning steps" + }, + { + "type": "text", + "bbox": [ + 0.698, + 0.348, + 0.888, + 0.364 + ], + "angle": 0, + "content": "Dynamically evolving information needs in complex reasoning tasks" + }, + { + "type": "text", + "bbox": [ + 0.698, + 0.367, + 0.887, + 0.382 + ], + "angle": 0, + "content": "Fixed retrieval strategies struggle to achieve real-time matching" + }, + { + "type": "title", + "bbox": [ + 0.67, + 0.397, + 0.858, + 0.408 + ], + "angle": 0, + "content": "Insufficient Depth & Breadth" + }, + { + "type": "text", + "bbox": [ + 0.682, + 0.416, + 0.848, + 0.423 + ], + "angle": 0, + "content": "The inherent static knowledge of LLMs" + }, + { + "type": "text", + "bbox": [ + 0.682, + 0.427, + 0.847, + 0.442 + ], + "angle": 0, + "content": "Challenge of covering dynamically evolving domain knowledge boundaries" + }, + { + "type": "text", + "bbox": [ + 0.682, + 0.448, + 0.847, + 0.463 + ], + "angle": 0, + "content": "The reasoning chains frequently terminate at superficial associations" + }, + { + "type": "text", + "bbox": [ + 0.682, + 0.465, + 0.845, + 0.48 + ], + "angle": 0, + "content": "The inability to establish cross-domain, multi-level knowledge connections" + }, + { + "type": "image_caption", + "bbox": [ + 0.273, + 0.551, + 0.724, + 0.567 + ], + "angle": 0, + "content": "Figure 4. The purpose of the synergy between RAG and reasoning" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.587, + 0.484, + 0.814 + ], + "angle": 0, + "content": "mentioned. Static retrieval methods lack the adaptability to capture such dynamically evolving information needs. A critical limitation lies in intent dynamicity: as contextual understanding expands, traditional systems generate fixed retrieval results based solely on the initial query. Furthermore, semantic representation limitations of dense retrieval models (e.g., BERT-based models) hinder their ability to encode intricate semantic relationships (e.g., irony, metaphors), leading to misaligned results. Current approaches attempt to mitigate these issues through multi-step intent decomposition (e.g., LevelRAG's high-level searcher breaks complex queries into multi-hop sub-queries [103]) and dynamic query reformulation (e.g., LeReT's reinforcement learning generates diversified query candidates [34]), iteratively refining retrieval strategies to align with document content." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.832, + 0.484, + 0.907 + ], + "angle": 0, + "content": "3.1.3 Inefficient Coordination of Multi-Source Heterogeneous Data. Retrieval from diverse sources—text, tables, graphs, web, and APIs—often produces fragmented results due to a lack of global reasoning. The key challenge is modal heterogeneity: different retrieval techniques (dense" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.587, + 0.93, + 0.86 + ], + "angle": 0, + "content": "retrieval for text, SQL for tables, GQL for graphs) operate independently without unified coordination. For example, experiments show standard RAG methods (like dense retrieval with query decomposition) yield only \\(32.7\\%\\) perfect recall and \\(40.9\\%\\) F1 on the OTT-QA dataset. These outcomes reveal the limitations of traditional approaches in aligning textual queries with structured tables—such as failing to link concepts like \"K-12 student free rates\" in text to related \"education expenditure\" columns when not explicitly mentioned. Additionally, disconnected entity matching (e.g., relating \"company revenue\" in text to financial tables) worsens inefficiencies, as conventional methods depend on semantic similarity and overlook domain-specific relationships and exact-value matches. Advanced techniques—such as reasoning-driven alignment (ARM's N-gram constraints for cross-modal entity decoding [7]) and unified semantic spaces (LevelRAG's shared multi-modal representations [103])—enable more effective, integrated retrieval." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.877, + 0.914, + 0.907 + ], + "angle": 0, + "content": "3.1.4 Incompleteness and Incoherence in Complex Retrieval Tasks. Single-step retrieval systems fall short in" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.086, + 0.06, + 0.347, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.859, + 0.06, + 0.912, + 0.071 + ], + "angle": 0, + "content": "Gao et al." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.092, + 0.483, + 0.258 + ], + "angle": 0, + "content": "complex multi-hop reasoning tasks, such as deducing entity chains or conducting decision analysis. Traditional static retrieval conflicts with multi-step cognitive needs, resulting in three main issues: 1) Path dependency, where later retrievals rely on information from earlier steps (e.g., finding \"the most populous county in California\" before its education policies), but conventional systems lack state management; 2) Error propagation, early retrieval errors cause mistakes in intermediate results, which then affect the next round of retrieval; 3) Semantic inflexibility of fixed queries, which cannot adapt to dynamic concepts like entity aliases or relational predicates." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.258, + 0.493, + 0.365 + ], + "angle": 0, + "content": "Advanced methods address these flaws through integrated strategies. PlanRAG uses iterative \"plan-retrospect-replan\" cycles to trigger sub-queries when gaps arise. Reinforcement learning in LeReT improves query generation via reward-driven path selection. Likewise, ITER-RETGEN rebuilds follow-up queries using intermediate answers (e.g., \"award recipient's height\") to resolve multi-hop dependencies." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.383, + 0.483, + 0.563 + ], + "angle": 0, + "content": "3.1.5 Trade-offs Between Retrieval Efficiency and Precision. Complex scenarios face a tension between exhaustive retrieval, which is computationally costly, and restricted retrieval, which risks information loss. Expanding retrieval blindly inflates costs (e.g., LLM API calls) without ensuring relevance. Simple queries suffer from unnecessary multi-step retrieval, wasting resources, while complex queries face quality risks if retrieval is too limited. Adaptive approaches like complexity-aware routing (Adaptive-RAG's lightweight classifier allocates retrieval budgets [41]) and cost-sensitive training (SmartRAG's reinforcement learning balances quality and steps [20]) dynamically manage this trade-off." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.564, + 0.483, + 0.7 + ], + "angle": 0, + "content": "In summary, Reasoning-Augmented Retrieval overcomes traditional RAG's limitations in dynamic triggering, semantic alignment, multi-hop support, domain adaptation, and efficiency trade-offs by deeply integrating reasoning into the retrieval process. Its key innovation is a bidirectional enhancement between reasoning and retrieval—reasoning refines retrieval strategies, while retrieval supports iterative reasoning—jointly boosting accuracy and efficiency in complex information tasks." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.722, + 0.367, + 0.737 + ], + "angle": 0, + "content": "3.2 Retrieval-Augmented Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.741, + 0.483, + 0.907 + ], + "angle": 0, + "content": "Retrieval-Augmented Reasoning (ReAR) combines external knowledge retrieval with inherent model reasoning to overcome failures from knowledge gaps or logical discontinuities in complex tasks. Unlike traditional RAG methods that retrieve information once, ReAR uses an iterative, context-sensitive retrieval that continuously provides relevant data to support multi-step reasoning. This approach is crucial for tasks needing strict logic, such as mathematical proofs, where intermediate steps require specific theorems or lemmas. By making retrieval an adaptive, ongoing process rather than a one-time step, ReAR strengthens each reasoning stage" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.092, + 0.912, + 0.121 + ], + "angle": 0, + "content": "with accurate, current information, improving the overall inference's reliability and robustness." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.122, + 0.915, + 0.333 + ], + "angle": 0, + "content": "ReAR's core feature is dynamic knowledge supplementation, generating retrieval queries in real-time based on the evolving reasoning context. This overcomes the limits of single-round retrieval by enabling knowledge refinement at each step, as seen in process supervision frameworks like RAG-Gym [96]. ReAR also improves reasoning paths using methods like search space compression—for example, MCTS-guided heuristics in KBQA—and structured feedback from diverse sources like knowledge graphs [97]. These techniques maintain logical consistency while reducing irrelevant or conflicting information. Importantly, ReAR adapts well across domains, supporting precise knowledge retrieval and tool use for specialized tasks such as industrial problem-solving in PIKE [82] or scientific reasoning [106]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.334, + 0.915, + 0.455 + ], + "angle": 0, + "content": "By integrating retrieval as an active part of the reasoning loop, ReAR addresses LLMs' temporal and depth constraints, ensuring adherence to domain-specific and time-sensitive requirements. This close coupling turns external knowledge into an on-demand resource, creating a closed-loop system that enhances the model's ability to handle complex, knowledge-intensive problems. Specifically, ReAR seeks to address the following limitations and challenges:" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.462, + 0.921, + 0.703 + ], + "angle": 0, + "content": "3.2.1 Knowledge Gap in Multi-step Reasoning. In long-range reasoning, missing intermediate knowledge often breaks logical chains, especially in industrial and scientific contexts requiring multi-source data integration (e.g., text, tables, time-series). Static retrieval methods worsen this by not adapting to the reasoning process's changing needs. ReAR techniques address this with chained retrieval, as in CoRAG [83], which breaks multi-hop questions into sequential sub-queries (e.g., retrieving \"event causes\" then their \"impacts\"), systematically linking knowledge. Reasoning-state-aware retrieval, used in FLARE [45], predicts future information needs by generating interim prompts (e.g., \"the next step requires discussion of ...\"), enabling dynamic query construction that preserves coherence. Together, these approaches resolve the conflict between discrete retrieval and continuous reasoning." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.711, + 0.915, + 0.845 + ], + "angle": 0, + "content": "3.2.2 Reasoning Discontinuity Caused by Domain Knowledge Boundaries. Reasoning discontinuity arises from LLMs' limited knowledge, struggling with specialized domains (e.g., semiconductor design in PIKE [82]) and real-time data (e.g., medical parameters in Agentic Reasoning [92]). End-to-end models often produce factual errors, while traditional RAG methods fail to retrieve deep professional knowledge due to coarse retrieval, especially with complex data like tables, charts and images." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.847, + 0.915, + 0.907 + ], + "angle": 0, + "content": "ReAR addresses this with two complementary solutions: knowledge atomization and structural organization, as in PIKE's decomposition of documents into fine-grained units and multi-layer knowledge graphs for semantic and logical" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.381, + 0.072 + ], + "angle": 0, + "content": "Synergizing RAG and Reasoning: A Systematic Review" + }, + { + "type": "header", + "bbox": [ + 0.653, + 0.06, + 0.912, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.092, + 0.482, + 0.182 + ], + "angle": 0, + "content": "retrieval; and dynamic tool integration, as in Agentic Reasoning's real-time data acquisition via code execution and API calls to compute critical indicators (e.g., medical FiO2). These innovations overcome the challenges of specialized knowledge depth and timely information relevance that limit conventional methods." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.198, + 0.483, + 0.212 + ], + "angle": 0, + "content": "3.2.3 Search Space Explosion and Local Optima Traps." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.213, + 0.483, + 0.424 + ], + "angle": 0, + "content": "The main challenge in multi-step reasoning is the exponential growth of the search space, where methods like Chain-of-Thought (CoT) often yield suboptimal or inconsistent results due to unconstrained hypotheses. Traditional approaches like CoT and Tree-of-Thought (ToT) lack external knowledge constraints, causing invalid assumptions, while purely symbolic reasoning falls short in open-domain tasks. To address this, two strategies are used: knowledge base-anchored heuristic search (KBQA-O1 [58]), which limits reasoning actions to subgraphs in knowledge graphs, and a retrieval-verification mechanism (Search-o1 [51]) that prunes unsupported reasoning paths using evidence from the knowledge base. Together, these reduce the search space and preserve reasoning coherence." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.439, + 0.483, + 0.772 + ], + "angle": 0, + "content": "3.2.4 Dynamic Knowledge Requirements in Multi-Step Reasoning. Complex multi-step reasoning tasks face the challenge of continuously changing knowledge requirements. This is evident in cases like multi-hop reasoning and engineering planning, where each stage generates new sub-problems (e.g., moving from \"architectural design\" to \"material cost estimation\"). Static knowledge bases or one-time retrieval methods cannot meet this evolving demand. This manifests in two ways: initial knowledge may miss later needs, causing gaps; and fixed knowledge sets may include irrelevant information, reducing reasoning accuracy. To address this, new retrieval-augmented reasoning approaches introduce dynamic solutions: process supervision (e.g., reward models in RAG-Gym [96]) detects knowledge gaps in real time, atomic decision-making (e.g., step decomposition in DeepRAG [24]) triggers retrieval as needed, and tree-like expansions (e.g., multi-path retrieval in DeepSolution [54]) enable parallel exploration. By integrating knowledge retrieval within reasoning, these methods let the system identify, supplement, and verify knowledge dynamically—much like a human expert—greatly enhancing the reliability and completeness of complex reasoning." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.786, + 0.483, + 0.801 + ], + "angle": 0, + "content": "3.2.5 Insufficient Depth and Breadth of Reasoning." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.802, + 0.483, + 0.907 + ], + "angle": 0, + "content": "This issue is prominent in expert tasks like medical diagnosis, legal analysis, and research report generation. LLMs' static knowledge often fails to capture the evolving scope of domain knowledge, resulting in shallow reasoning that misses multi-level, cross-domain connections. For example, when assessing \"Company A is affected by economic recession,\" traditional methods rely on superficial statistical" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.915, + 0.152 + ], + "angle": 0, + "content": "patterns and cannot systematically follow the deeper logical chain from \"Company A \\(\\rightarrow\\) industry supply chain \\(\\rightarrow\\) macroeconomic policy \\(\\rightarrow\\) international political landscape,\" leading to reasoning that lacks causal depth." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.153, + 0.915, + 0.348 + ], + "angle": 0, + "content": "To overcome this, recent advances use structured, retrieval-enhanced frameworks. ToG2.0 [60] models Knowledge Graph relational paths as retrieval guidance vectors, enabling targeted queries along entity paths, surpassing the limits of keyword-based retrieval. This approach complements CR-Planner's [52] iterative expansion, which triggers retrieval of specialized knowledge (e.g., textbook proofs of algorithm complexity) at critical reasoning points, ensuring accurate domain knowledge integration via multi-round validation. Addressing cross-domain knowledge linkage, CO-STORM [43] employs a multi-agent system whose host module generates cross-modal retrieval commands by analyzing potential semantics in uncited documents." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.368, + 0.723, + 0.386 + ], + "angle": 0, + "content": "4 Patterns of synergy" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.388, + 0.915, + 0.54 + ], + "angle": 0, + "content": "Section 3 detailed the need and motivation for integrating RAG with reasoning. Building on this, this section presents two core implementation patterns for RAG-reasoning synergy (Figure 5): (1) the Pre-defined Workflow, which uses logical architectures with preset rules for coordination, and (2) Dynamic Workflow, which relies on context-aware, adaptive coordination via real-time decision engines. These patterns illustrate current frameworks combining knowledge retrieval and multi-step reasoning from deterministic and flexible perspectives." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.557, + 0.719, + 0.57 + ], + "angle": 0, + "content": "4.1 Pre-defined workflow" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.576, + 0.915, + 0.786 + ], + "angle": 0, + "content": "Pre-defined workflow is a multi-step reasoning approach with a fixed architecture and sequential execution, emphasizing process clarity and operational determinism. It consists of predefined iterative stages, each with strict input-output rules and no dynamic changes based on intermediate results. This modular design ensures controllability and structured reasoning for complex tasks. All steps are executed regardless of intermediate outcomes, guaranteeing repeatability and stability while avoiding uncertainties from dynamic decisions. Although it sacrifices adaptability, this approach offers procedural predictability and is well-suited for scenarios demanding clear reasoning paths, albeit with possible computational redundancy due to lack of real-time adjustments." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.787, + 0.915, + 0.863 + ], + "angle": 0, + "content": "Mathematically, the pre-defined RAG workflow can be formalized as a deterministic multi-step operational chain. Given an input query \\( Q \\) and a predefined sequence of \\( N \\) reasoning steps and the final decision output \\( D \\), the complete workflow is expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.63, + 0.892, + 0.914, + 0.908 + ], + "angle": 0, + "content": "\\[\nD = f _ {N} \\circ \\dots \\circ f _ {2} \\circ f _ {1} (Q) \\tag {1}\n\\]" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.347, + 0.073 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.858, + 0.06, + 0.912, + 0.072 + ], + "angle": 0, + "content": "Gao et al." + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.089, + 0.913, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.297, + 0.347, + 0.7, + 0.364 + ], + "angle": 0, + "content": "Figure 5. Patterns of Synergy between RAG and Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.383, + 0.483, + 0.519 + ], + "angle": 0, + "content": "where each \\(f_{i}\\in \\{\\Psi ,R,\\Gamma \\}\\) denotes strictly defined functions for reasoning \\((\\Psi)\\), retrieval \\((R)\\), or decision-making \\((\\Gamma)\\), with \\(\\circ\\) representing function composition. This formulation adheres to the fixed mapping sequence \\(Q\\mapsto \\Psi (Q)\\mapsto R(\\Psi (Q))\\mapsto \\Gamma (R(\\Psi (Q)))\\), exhibiting Markovian properties where \\(f_{t + 1}\\) depends solely on \\(f_{t}\\)'s output while remaining independent of historical states \\(\\{f_{< t}\\}\\). The chained composition guarantees process closure and reproducibility, though constrained by the static combinatorial nature of \\(\\{f_i\\}_{i = 1}^N\\)." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.519, + 0.483, + 0.564 + ], + "angle": 0, + "content": "In the pre-defined pipeline, based on the position where reasoning is introduced, it can be further divided into Pre-Retrieval, Post-Retrieval, and Hybrid." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.573, + 0.483, + 0.603 + ], + "angle": 0, + "content": "4.1.1 Pre-Retrieval Reasoning. For pre-retrieval methods, the sequence is explicitly defined as" + }, + { + "type": "equation", + "bbox": [ + 0.22, + 0.612, + 0.482, + 0.627 + ], + "angle": 0, + "content": "\\[\nD = \\Gamma \\circ \\mathcal {R} \\circ \\Psi (Q) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.635, + 0.483, + 0.725 + ], + "angle": 0, + "content": "where \\(\\Psi\\) denotes a reasoning operator that systematically transforms or enriches the query prior to retrieval. This paradigm enhances retrieval precision by resolving ambiguities, inferring implicit intents, or optimizing query representations. Current research identifies four principal methodological categories for designing \\(\\Psi\\):" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.726, + 0.483, + 0.862 + ], + "angle": 0, + "content": "Query Optimization focuses on generating and selecting query variants to maximize retrieval relevance. Mathematically, this is formalized as Candidates \\( = \\) Generate(Q,C), \\( \\Psi_{\\mathrm{Optimize}}(Q,C) = \\arg \\max_{\\mathrm{candidate} \\in \\mathrm{Candidates}} \\) Score(candidate), where (Generate) produces candidate queries and (arg max) selects optimal variants based on contrastive training or reinforcement learning. Representative implementations, such as LeReT [34], leverage iterative sampling and optimization to balance query diversity and specificity." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.862, + 0.483, + 0.909 + ], + "angle": 0, + "content": "Attribute Judgment employs classification mechanisms to dynamically regulate retrieval triggers. This is modeled as \\(\\Psi_{\\mathrm{Classify}}(Q) = \\mathrm{Classify}(Q)\\), where Classify evaluates query" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.383, + 0.919, + 0.443 + ], + "angle": 0, + "content": "attributes (e.g., temporal sensitivity, intent complexity) against predefined criteria. Frameworks like UAR [14] and AdaptiveRAG [41] exemplify this approach by integrating multistage classifiers to minimize unnecessary retrievals." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.444, + 0.916, + 0.534 + ], + "angle": 0, + "content": "Plan Generation decomposes complex queries into structured sub-task sequences to guide retrieval direction. Formulated as \\(\\Psi_{\\mathrm{Plan}}(Q) = \\mathrm{Plan}(Q)\\), the operator Plan generates hierarchical task decompositions, as seen in PlanRAG [48], which utilizes chain-of-thought reasoning to align retrieval targets with multi-step problem-solving requirements." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.535, + 0.915, + 0.624 + ], + "angle": 0, + "content": "Semantic Enhancement enriches query representations using domain-specific or task-aware embeddings. Expressed as \\(\\Psi_{\\text{Enhance}}(Q) = \\text{Encode}(Q, \\mathcal{K})\\), where \\(\\mathcal{K}\\) denotes auxiliary knowledge (e.g., reasoning trajectories), methods like O1-Embedder [101] integrate latent reasoning patterns into query embeddings to improve retrieval robustness." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.625, + 0.915, + 0.699 + ], + "angle": 0, + "content": "Collectively, these methodologies demonstrate that pre-retrieval reasoning serves as a systematic interface to mitigate semantic gaps between raw queries and knowledge bases, establishing a critical component for precision-driven RAG architectures." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.713, + 0.916, + 0.865 + ], + "angle": 0, + "content": "4.1.2 Post-Retrieval Reasoning. In pre-defined RAG systems with multi-step reasoning pipelines, the post-retrieval reasoning paradigm represents a critical advancement where cognitive processing occurs after information retrieval from external sources. This approach addresses inherent limitations in conventional RAG, particularly in managing knowledge conflicts, mitigating information insufficiency, and enhancing logical consistency across complex reasoning tasks. Mathematically, this process can be formalized as a deterministic function composition:" + }, + { + "type": "equation", + "bbox": [ + 0.652, + 0.892, + 0.913, + 0.907 + ], + "angle": 0, + "content": "\\[\nD = \\Gamma \\circ \\Psi \\circ \\mathcal {R} (Q) \\tag {3}\n\\]" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.381, + 0.072 + ], + "angle": 0, + "content": "Synergizing RAG and Reasoning: A Systematic Review" + }, + { + "type": "header", + "bbox": [ + 0.653, + 0.06, + 0.912, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.092, + 0.482, + 0.121 + ], + "angle": 0, + "content": "\\(\\mathcal{R}\\) denotes the retrieval operator, \\(\\Psi\\) implements the reasoning transformation, and \\(\\Gamma\\) represents the final decision function." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.122, + 0.483, + 0.379 + ], + "angle": 0, + "content": "The core characteristic of Post-Retrieval Reasoning lies in its execution of the reasoning process after retrieval, with the reasoning target being the retrieved content. ToG2.0 [60] proposes an iterative multi-step reasoning framework that alternates between graph retrieval and context retrieval, integrating the reasoning judgment of LLMs to progressively expand entities and prune irrelevant information, ultimately generating accurate answers. This approach dynamically addresses the issue of insufficient information through iterative refinement while establishing a dual-evidence verification mechanism via knowledge graph relation pruning and entity-guided context retrieval. Its graph-structured reasoning module transforms the connectivity validation of triple paths into a constraint satisfaction problem, effectively mitigating logical inconsistencies between text fragments and thereby significantly improving the quality of complex question answering." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.379, + 0.483, + 0.651 + ], + "angle": 0, + "content": "ActiveRAG [100], on the other hand, employs a predefined three-stage process (Self-Inquiry \\(\\rightarrow\\) Knowledge Assimilation \\(\\rightarrow\\) Thought Accommodation) to structurally comprehend and calibrate retrieved knowledge, resolving conflicts between parametric memory and external knowledge. During the Knowledge Assimilation stage, ActiveRAG enhances the corrective effect of external knowledge on the internal representations of LLMs through multi-instruction fine-tuning strategies (e.g., counterfactual comparison and anchor association), substantially reducing the likelihood of hallucination generation. ARM's [7] structural alignment and self-verification stages also demonstrate optimization for post-retrieval reasoning. By incorporating domain knowledge via mixed-integer programming (MIP) solvers, ARM ensures the rationality and coverage of retrieval results, providing a scalable optimization framework for multi-source data compatibility and thereby enabling globally optimal cross-modal retrieval." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.658, + 0.496, + 0.78 + ], + "angle": 0, + "content": "4.1.3 Hybrid Reasoning. The Hybrid pattern of pre-defined process forms a composite processing paradigm by integrating pre-retrieval reasoning with post-retrieval reasoning. The essence is formalized as a multi-round recursive iterative process, where each iteration cycle strictly comprises three phases: Retrieval, Generation, and Reasoning, executed as structured composite operations. Let the total number of iterations be \\( T \\); the workflow is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.79, + 0.483, + 0.815 + ], + "angle": 0, + "content": "\\[\nQ _ {T} = \\left(\\bigcirc_ {t = 1} ^ {T} \\mathcal {R} _ {\\square} \\circ \\Gamma_ {t} \\circ \\Psi_ {t}\\right) \\left(Q _ {0}\\right) \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.817, + 0.485, + 0.907 + ], + "angle": 0, + "content": "Here, each iterative unit is indexed by \\( t \\). The process terminates when a predefined condition \\( \\mathcal{T}(Q_t, D_t, C_t) \\) is met, yielding the final response \\( \\Gamma_{\\mathrm{final}}(C_T) \\). This recursive mechanism enables dynamic synergy between knowledge acquisition and semantic inference, overcoming the linear limitations of single-cycle retrieval-generation frameworks." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.915, + 0.288 + ], + "angle": 0, + "content": "IR-CoT [78] leverages chain-of-thought reasoning to iteratively construct intermediate logic chains, enabling multi-hop retrieval guided by progressively refined contextual cues. FinSearch [50] introduces a dual-phase architecture that first generates structured search graphs to model temporal and entity dependencies, followed by dynamic query rewriting to optimize financial data retrieval. LevelRAG employs hierarchical validation mechanisms, aggregating multi-granular retrieval results and triggering supplementary retrievals based on context completeness assessments. ITER-RETGEN [68] utilizes generation-enhanced feedback loops to iteratively refine query representations, enhancing semantic alignment between retrieval and generation phases." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.289, + 0.932, + 0.455 + ], + "angle": 0, + "content": "These approaches share a common foundation in structured recursion while diverging in operational mechanisms. By enforcing deterministic iteration cycles, they balance controlled workflow execution with adaptive semantic exploration, addressing challenges such as multi-step reasoning, temporal coherence, and cross-domain knowledge synthesis. The hybrid paradigm's strength lies in its capacity to decompose complex queries into iterative retrieval-generation units, systematically bridging knowledge gaps while maintaining interpretability and robustness in open-domain problem-solving scenarios." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.481, + 0.742, + 0.495 + ], + "angle": 0, + "content": "4.2 Dynamic RAG Workflow" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.5, + 0.915, + 0.741 + ], + "angle": 0, + "content": "The RAG with dynamic workflow represents an autonomous reasoning architecture centered around LLMs, characterized by the integration of non-deterministic operational workflows and real-time decision-making capabilities. Unlike predefined pipelines, this architecture enables continuous monitoring of reasoning states to dynamically trigger retrieval, generation, or verification operations. The LLM actively evaluates contextual demands during reasoning processes, autonomously determining optimal moments for invoking external tools or resources through a hybrid feedback coordination mechanism. By eliminating fixed iterative units and pre-determined tool-calling sequences, the framework achieves dynamic evolution of execution pathways, demonstrating superior adaptability in complex cognitive tasks through real-time adjustment of computational workflows based on intermediate reasoning outcomes." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.741, + 0.915, + 0.907 + ], + "angle": 0, + "content": "This dynamic architecture manifests three principal characteristics: 1) Operator invocation is governed by the LLM's contextual state analysis, exemplified through special token prediction (e.g., '[Web-Search]' or `') to initiate external operations; 2) Reasoning trajectories exhibit high flexibility, allowing dynamic query reformulation and sub-problem generation to overcome limitations of static workflows; 3) Context-driven decision mechanisms prioritize real-time reasoning states over predefined rules, enhancing systemic responsiveness to emergent task complexities while improving precision." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.347, + 0.073 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.858, + 0.06, + 0.912, + 0.072 + ], + "angle": 0, + "content": "Gao et al." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.092, + 0.483, + 0.153 + ], + "angle": 0, + "content": "Defining the reasoning state at time \\( t \\) as \\( S_{t} = (H_{t}, C_{t}) \\), where \\( H_{t} \\) denotes historical information aggregation and \\( C_{t} \\) represents contextual embedding vectors, the decision process is modeled as a stochastic system:" + }, + { + "type": "equation", + "bbox": [ + 0.23, + 0.171, + 0.483, + 0.188 + ], + "angle": 0, + "content": "\\[\na _ {t + 1} \\sim \\pi \\left(S _ {t}; \\Theta\\right) \\tag {5}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.209, + 0.212, + 0.483, + 0.228 + ], + "angle": 0, + "content": "\\[\nS _ {t + 1} = \\delta \\left(S _ {t}, \\mathcal {T} _ {a _ {t + 1}} \\left(S _ {t}\\right)\\right) \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.233, + 0.483, + 0.399 + ], + "angle": 0, + "content": "Here, \\(\\pi : S \\to \\Delta(\\mathcal{A})\\) constitutes the policy function mapping states to probability distributions over action space \\(\\mathcal{A}\\) (retrieval, generation, verification, etc.), while \\(\\mathcal{T}_a\\) denotes state transition functions corresponding to action \\(a\\). The non-Markovian nature of the system emerges from \\(S_{t+1}\\)'s dependence on complete historical trajectories \\(\\{S_{\\leq t}\\}\\), with dynamic adaptability ensured through extensible action spaces \\(\\mathcal{A}\\) and online optimization of policy parameters \\(\\Theta\\). This formulation enables context-sensitive state updates via \\(\\delta : S \\times \\mathcal{O} \\to S\\), establishing a theoretical foundation for open-ended reasoning processes in complex problem domains." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.4, + 0.483, + 0.792 + ], + "angle": 0, + "content": "Based on the mode of reasoning initiation, agentic RAG with dynamic workflows can be further categorized into three distinct types: Proactivity-driven, Reflection-driven, and Feedback-driven mechanisms. The LLM proactivity-driven approach is characterized by the model's autonomous triggering of actions based on internal assessments, executing operations without external intervention through mechanisms analogous to human intuitive decision-making—for instance, when the model independently identifies insufficient evidentiary support in the current reasoning process, it proactively generates retrieval requests to supplement information. The reflection-driven mode emphasizes self-examination of the reasoning process, dynamically initiating subsequent operations through quantitative evaluation of intermediate result quality (e.g., triggering actions when the calculated reasoning support score of 0.7 exceeds a predefined threshold of 0.6), which simulates the self-optimization logic of expert systems, enabling the model to adjust reasoning pathways through introspection. The feedback-driven mechanism incorporates external intervention, employing independent models or rule-based systems to perform real-time scoring of intermediate states (e.g., an external reward model assigning a 2.5/5 score to reasoning steps) while providing corrective suggestions, operating similarly to a mentor-guided mode that continuously calibrates the reasoning workflow through external feedback signals." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.801, + 0.483, + 0.907 + ], + "angle": 0, + "content": "4.2.1 Proactivity-Driven Reasoning. The core innovation of Proactivity-driven Reasoning lies in enabling LLMs to fully govern the reasoning process through self-triggered prediction mechanisms. This active control manifests through three key mechanisms: (1) direct tool invocation via model-generated special tokens (e.g., [Web-Search]), without external intervention, (2) context-aware decision making based" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.915, + 0.137 + ], + "angle": 0, + "content": "on real-time knowledge gaps or hypothesis verification requirements, and (3) Markov Decision Process (MDP)-based dynamic path optimization." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.137, + 0.915, + 0.288 + ], + "angle": 0, + "content": "Formally, the reasoning process can be modeled as a state sequence \\( S = \\{s_0, s_1, \\ldots, s_t\\} \\), where each state \\( s_t \\) encapsulates the current reasoning context. At each step \\( t \\), the LLM selects an action \\( a_t \\in \\{\\text{retrieve, generate, terminate}\\} \\) based on \\( s_t \\), executes the corresponding operation (e.g., document retrieval or answer generation), and updates its state through transition function \\( s_{t+1} = \\delta(s_t, a_t, o_t) \\) where \\( o_t \\) represents action outcomes. This MDP framework enables dynamic path adjustment through real-time feedback until termination (\\( a_T = \\text{terminate} \\)) and final answer generation." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.288, + 0.93, + 0.47 + ], + "angle": 0, + "content": "Recent advancements demonstrate significant improvements over conventional RAG approaches. The Agentic Reasoning framework achieves granular control through dynamic tool invocation, eliminating predefined execution sequences. DeepRAG [24] optimizes cost-accuracy tradeoffs via MDP-based imitation learning, addressing the retrieval-generation disconnection in traditional systems. CoRAG [83] introduces hybrid-driven mechanisms combining LLM-initiated subqueries with external policy control, enhancing error tolerance for complex queries. Collectively, these approaches establish a paradigm shift from fixed pipelines to context-sensitive, self-optimizing reasoning architectures." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.477, + 0.922, + 0.658 + ], + "angle": 0, + "content": "4.2.2 Reflection-Driven Reasoning. The reflection-driven mechanism represents a dynamic reasoning framework that enables iterative self-evaluation and revision of intermediate outputs through model introspection. Common methods include: (1) a evaluation system combining explicit token prediction and implicit confidence scoring, (2) self-monitoring capabilities through grounding tokens for content-document consistency verification and utility tokens for answer effectiveness assessment, and (3) adaptive routing mechanisms that automatically select single-hop or multi-hop reasoning paths based on contextual complexity. The mathematical formalism of this process can be expressed as:" + }, + { + "type": "equation", + "bbox": [ + 0.563, + 0.669, + 0.914, + 0.71 + ], + "angle": 0, + "content": "\\[\n\\mathcal {P} = \\bigcup_ {t = 1} ^ {T} \\left[ G \\left(\\mathbf {C} _ {t}\\right)\\rightarrow E \\left(\\mathbf {H} _ {t}, \\mathcal {D}\\right)\\rightarrow \\psi \\left(\\phi \\left(\\mathbf {e} _ {t}\\right), \\tau\\right)\\right] \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.711, + 0.914, + 0.786 + ], + "angle": 0, + "content": "where \\(G\\) denotes the generation function operating on current context \\(\\mathbf{c}_t\\), \\(E\\) represents the evaluation function that assesses hidden states \\(\\mathbf{h}_t\\) against external knowledge base \\(\\mathcal{D}\\), \\(\\phi\\) serves as the confidence mapping function, \\(\\tau\\) is the decision threshold, and \\(\\psi\\) functions as the branch selector." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.786, + 0.915, + 0.907 + ], + "angle": 0, + "content": "In practical implementations like Self-RAG [3], this framework generates candidate responses alongside reflection tokens, computes passage relevance scores (ISREL \\(\\in\\) [0,1]) and factual support metrics (ISSUP), and employs weighted aggregation of token probabilities in \\(\\phi\\) to determine retrieval activation or generation revision through threshold-based \\(\\delta\\) operations. Meanwhile, Open-RAG [38] incorporates hybrid threshold mechanisms and Mixture-of-Experts architecture" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.381, + 0.073 + ], + "angle": 0, + "content": "Synergizing RAG and Reasoning: A Systematic Review" + }, + { + "type": "header", + "bbox": [ + 0.653, + 0.06, + 0.913, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.092, + 0.483, + 0.227 + ], + "angle": 0, + "content": "to enforce counterfactual verification through non-retrieval confidence scoring \\((\\mathrm{Pr}_{\\mathrm{NoRT}})\\), enabling dynamic expansion of complex reasoning capabilities while preserving base model efficiency. ReaRAG [49] utilizes knowledge-guided reasoning chains combined with external knowledge sources to perform reflection-driven reasoning. In each iteration, it adjusts the reasoning path through the \"Thought-Action-Observation\" paradigm, effectively preventing error propagation and improving answer accuracy." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.228, + 0.483, + 0.364 + ], + "angle": 0, + "content": "The paradigm's innovation lies in reconstructing traditional sequential processes into conditional Markov decision processes, where state transition probabilities \\( P(s_{t + 1}|s_t) \\) are dynamically determined by model self-evaluation outcomes. Compared to proactive LLM-driven methods (e.g., Toolformer's direct API invocation), the reflection-driven approach establishes closed-loop control through explicit evaluation stages (function \\( E \\)), effectively mitigating hallucination risks while maintaining computational efficiency." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.371, + 0.483, + 0.538 + ], + "angle": 0, + "content": "4.2.3 Feedback-Driven Reasoning. The feedback-driven dynamic RAG system establishes closed-loop control over reasoning processes through external signals, formally modeled as a Partially Observable Markov Decision Process. The system state \\( s_t = (q_t, \\mathcal{K}_t, H_t) \\) evolves through iterative interactions, comprising the current query representation \\( q_t \\), dynamic knowledge base \\( \\mathcal{K}_t \\), and historical trajectory \\( \\mathcal{H}_t \\). Initialized with \\( q_0 \\) and \\( \\mathcal{K}_0 = \\emptyset \\), the policy function \\( \\pi(a_t | s_t) \\) generates actions from the operational space \\( \\mathcal{A} = \\{\\text{Retrieive}, \\text{Reason}, \\text{Verify}, \\text{Answer}, \\emptyset\\} \\). State transitions follow \\( s_{t+1} = \\delta(s_t, a_t) \\) with knowledge base updates" + }, + { + "type": "equation", + "bbox": [ + 0.136, + 0.542, + 0.482, + 0.559 + ], + "angle": 0, + "content": "\\[\n\\mathcal {K} _ {t + 1} = \\mathcal {K} _ {t} \\oplus \\operatorname {R e t r i e v e} \\left(q _ {t}\\right) \\cdot \\mathbb {I} \\left(a _ {t} = \\text {R e t r i e v e}\\right) \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.563, + 0.483, + 0.609 + ], + "angle": 0, + "content": "where \\(\\oplus\\) denotes incremental updates and \\(\\mathbb{I}\\) represents an indicator function. The reward function \\(R(s_{t},a_{t},s_{t + 1})\\to r_{t}\\) drives policy optimization through" + }, + { + "type": "equation", + "bbox": [ + 0.16, + 0.614, + 0.482, + 0.63 + ], + "angle": 0, + "content": "\\[\n\\pi_ {t + 1} = \\Omega \\left(\\pi_ {t}, \\nabla_ {\\theta} \\mathbb {E} _ {a \\sim \\pi_ {t}} \\left[ R \\left(s _ {t}, a, s _ {t + 1}\\right) \\right]\\right) \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.634, + 0.483, + 0.664 + ], + "angle": 0, + "content": "forming an adaptive control loop. Three distinct feedback mechanisms emerge within this framework." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.665, + 0.488, + 0.723 + ], + "angle": 0, + "content": "Explicit reward feedback employs specialized models \\(\\pi_{\\mathrm{reward}}\\) for quantitative evaluation, exemplified by RAG-Gym's process rewards [96]. The reward function combines immediate and terminal rewards:" + }, + { + "type": "equation", + "bbox": [ + 0.152, + 0.728, + 0.482, + 0.747 + ], + "angle": 0, + "content": "\\[\nr _ {t} = \\lambda_ {1} \\pi_ {\\text {r e w a r d}} \\left(s _ {t}\\right) + \\lambda_ {2} \\mathbb {E} _ {s _ {t + k}} \\left[ \\gamma^ {k} R _ {\\text {t e r m i n a l}} \\right] \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.751, + 0.483, + 0.781 + ], + "angle": 0, + "content": "with discount factor \\(\\gamma\\). SmartRAG extends this through policy gradient optimization" + }, + { + "type": "equation", + "bbox": [ + 0.149, + 0.785, + 0.482, + 0.825 + ], + "angle": 0, + "content": "\\[\n\\nabla_ {\\theta} J (\\theta) = \\mathbb {E} _ {\\tau \\sim \\pi_ {\\theta}} [ \\sum_ {t = 0} ^ {T} \\nabla_ {\\theta} \\log \\pi_ {\\theta} (a _ {t} | s _ {t}) \\hat {A} _ {t} ] \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.831, + 0.483, + 0.859 + ], + "angle": 0, + "content": "where the advantage function \\(\\hat{A}_t\\) integrates temporal feedback." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.862, + 0.484, + 0.907 + ], + "angle": 0, + "content": "Implicit environmental feedback derives from knowledge base validation, as implemented in KBQA-o1's SPARQL verification and SolutionRAG's pruning mechanisms [58]." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.092, + 0.915, + 0.168 + ], + "angle": 0, + "content": "This feedback is formalized as \\(r_t = \\mathbb{I}(\\mathcal{K}_t\\models q_0)\\cdot c_{\\mathrm{valid}} - \\mathbb{I}(\\bot \\in \\mathcal{K}_t)\\cdot c_{\\mathrm{invalid}}\\) with validation function \\(\\mathbb{I}(\\cdot)\\) and penalty coefficients \\(c\\). ReARTeR [75] introduces threshold-triggered correction: when \\(r_t < \\tau\\), it activates refinement loops \\(\\mathcal{K}_{t + 1} = \\mathrm{PEM}(\\mathcal{K}_t,q_0)\\oplus \\mathrm{Retrieve}(\\mathrm{PRM}(s_t))\\)." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.168, + 0.913, + 0.213 + ], + "angle": 0, + "content": "Structured rule feedback encodes domain knowledge through differentiable scoring functions. MCTS-KBQA [97] implements depth-attenuated rewards" + }, + { + "type": "equation", + "bbox": [ + 0.608, + 0.226, + 0.913, + 0.264 + ], + "angle": 0, + "content": "\\[\nr _ {t} = \\frac {1}{1 + \\alpha d _ {t}} \\sum_ {i = 1} ^ {n} \\mathrm {L L M} _ {\\text {s c o r e}} \\left(a _ {t} ^ {(i)}\\right) \\tag {12}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.277, + 0.915, + 0.323 + ], + "angle": 0, + "content": "with search depth \\( d_t \\) and decay coefficient \\( \\alpha \\). CR-Planner's hierarchical critique combines subgoal and execution scores: \\( r_t^{\\mathrm{total}} = \\beta_1\\pi_{\\mathrm{sub}}(s_t) + \\beta_2\\pi_{\\mathrm{exec}}(a_t|s_t) \\) through weighted fusion." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.323, + 0.915, + 0.458 + ], + "angle": 0, + "content": "These feedback mechanisms interact through a unified strategy update framework, where external feedback-driven approaches achieve controllable optimization of the reasoning process through interpretable feedback signals while maintaining the generative capabilities of LLMs. Overall, the dynamic process of RAG, by endowing the model with autonomy in the reasoning process, not only enhances adaptability to complex tasks but also provides a new solution for efficient reasoning in resource-constrained environments." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.475, + 0.855, + 0.492 + ], + "angle": 0, + "content": "5 Implementation and Optimization" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.495, + 0.915, + 0.616 + ], + "angle": 0, + "content": "Building upon preceding sections, this section systematically analyzes the concrete implementation and optimization strategies for reasoning within the RAG paradigm. In contrast to existing surveys that predominantly focus on posttraining methodologies or isolated LLM reasoning mechanisms, our analysis maintains a dedicated focus on the synergistic integration of RAG with reasoning examining their co-adaptive implementations through a structural lens." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.632, + 0.694, + 0.646 + ], + "angle": 0, + "content": "5.1 Reasoning Process" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.65, + 0.915, + 0.907 + ], + "angle": 0, + "content": "5.1.1 LLM CoT. Integrating Chain-of-Thought (CoT) reasoning with LLMs is key to combining RAG with complex reasoning tasks. Research shows CoT enhances RAG systems by explicitly guiding multi-step reasoning and dynamically incorporating external knowledge. For example, ActiveRAG [100] uses a \"Self-Inquiry \\(\\rightarrow\\) Knowledge Assimilation \\(\\rightarrow\\) Thought Accommodation\" chain to align knowledge and reasoning: a knowledge assimilation agent merges external documents with LLM memory via operations like association and reflection, creating structured knowledge. Meanwhile, a reasoning adaptation agent refines inference chains from Self-Inquiry to ensure answers align with retrieved knowledge and address reasoning gaps. Similarly, Adaptive-RAG [41] alternates between CoT and retrieval, breaking down multi-hop reasoning into steps such as entity localization and document correlation, refining retrieval and generation based on prior results." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.347, + 0.073 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.858, + 0.06, + 0.912, + 0.072 + ], + "angle": 0, + "content": "Gao et al." + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.088, + 0.913, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.085, + 0.27, + 0.913, + 0.416 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.201, + 0.431, + 0.799, + 0.448 + ], + "angle": 0, + "content": "Figure 6. Implementation and optimization of the synergy between RAG and Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.466, + 0.485, + 0.664 + ], + "angle": 0, + "content": "At the knowledge and reasoning level, O1-Embedder [101] drives RAG through open-ended long-text reasoning, extending CoT beyond fixed triggers via coherent thought processes like problem decomposition. PlanRAG [48] explicitly uses CoT to produce executable multi-step plans, adjusting operations dynamically through a closed-loop \"plan-execute-feedback\" cycle. Despite different implementations, these methods share two CoT strengths: breaking down complex problems into clear intermediate steps and guiding external knowledge selection through reasoning states. Studies show these approaches outperform traditional RAG in multi-hop QA and knowledge-intensive tasks by enhancing both LLMs' reasoning and adaptability to external knowledge." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.696, + 0.515, + 0.907 + ], + "angle": 0, + "content": "5.1.2 Special Token Prediction. Recent advances active RAG also highlight special token prediction as a key method for dynamically linking external knowledge retrieval with multi-step reasoning [16]. By embedding domain- or action-specific tokens (e.g., '[Web-search]', '[Retrieve=Yes)', `') into LLM vocabularies, models can autonomously trigger tools or self-reflect during text generation. Frameworks like Self-RAG [3] and SmartRAG [20] use dedicated tokens ('Retrieve', 'ISREL', '[RETRIEVE]') to manage retrieval activation, relevance checks, and output verification, turning static reasoning chains into conditional workflows. The innovation lies in predicting these tokens within generated sequences, segmenting tasks into retrieval initiation, document evaluation, and knowledge grounding phases." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.466, + 0.915, + 0.648 + ], + "angle": 0, + "content": "Hybrid models such as Open-RAG [38] combine token control with mixture-of-experts (MoE) routing, sparsely activating experts aligned with token-predicted reasoning. Unlike traditional chain-of-thought or search tree methods, special token prediction offers finer control and interpretability by encoding decision logic explicitly in token sequences while maintaining end-to-end training. This approach also overcomes latency and inflexibility of preset retrieval schedules by enabling context-aware, on-demand tool use. For example, R1-Searcher [72] and Search-o1 [51] use token boundaries like `' to coordinate retrieval pauses and resume generation after knowledge integration." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.648, + 0.915, + 0.724 + ], + "angle": 0, + "content": "Together, these systems show that token-level prediction not only bridges reasoning and retrieval but also creates a scalable framework for tool-enhanced language agents, preserving generative fluency while enabling systematic external knowledge integration and procedural reasoning." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.756, + 0.915, + 0.862 + ], + "angle": 0, + "content": "5.1.3 Search-Driven Reasoning. Recent advancements in search-driven reasoning have significantly improved RAG frameworks by employing structured search strategies for dynamic information exploration and multi-step reasoning with external knowledge. Current approaches mainly follow three paradigms: tree-based search, MCTS, and reinforcement learning-optimized policy networks." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.862, + 0.915, + 0.907 + ], + "angle": 0, + "content": "Tree-based methods organize reasoning hierarchically through structured path exploration. For example, StePO-Rec [5] uses a multi-step tree-structured reasoning method" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.086, + 0.061, + 0.381, + 0.072 + ], + "angle": 0, + "content": "Synergizing RAG and Reasoning: A Systematic Review" + }, + { + "type": "header", + "bbox": [ + 0.654, + 0.061, + 0.912, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.092, + 0.482, + 0.287 + ], + "angle": 0, + "content": "that iteratively retrieves different outfit matching knowledge and user preferences at each node, ultimately achieving generative recommendations for complementary items. OmniThink [94] uses an information tree to expand topic analysis by generating subqueries that guide breadth-first or depth-first retrievals. DeepRAG [24] applies a binary tree search within a Markov decision process to explore parametric knowledge and retrieval paths in parallel, selecting optimal branches. DeepSolution's [54] bidirectional thinking tree alternates expanding solution and critique nodes with scoring for path pruning, aligning naturally with MCTS evaluation. These methods balance exploration efficiency with solution coverage through explicit tree structures." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.288, + 0.482, + 0.499 + ], + "angle": 0, + "content": "MCTS enhances robustness by optimizing long-term decisions via simulation, evaluation, and backpropagation. CR-Planner [52] integrates MCTS with the UCB strategy to balance exploration and exploitation while estimating optimal subgoals through multi-step simulations. KBQA-O1 [58] and MCTS-KBQA [97] generate candidate actions using policy models and combine reward models to globally assess logical forms, reducing local optima. ReARTeR [75] innovatively merges MCTS with procedural reward models (PRMs), interleaving retrieval and reasoning steps, and filtering high-reward paths to form a closed-loop \"reason-retrieve-reason\" cycle. These methods probabilistically explore paths and use reinforcement learning feedback to improve global reasoning for complex tasks." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.5, + 0.482, + 0.605 + ], + "angle": 0, + "content": "Reinforcement learning-optimized policy networks adaptively refine search strategies. LeReT [34] replaces fixed search algorithms with reinforcement learning (e.g., IPO) to dynamically optimize query generation based on rewards like retrieval accuracy, implicitly learning optimal search patterns without explicit tree or graph structures, thus offering greater flexibility and scalability." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.606, + 0.482, + 0.725 + ], + "angle": 0, + "content": "In summary, search-driven reasoning unites inference and retrieval through structured strategies, combining multi-path exploration, dynamic evaluation, and adaptive optimization to deliver interpretable, efficient solutions for knowledge-intensive tasks. Future work may focus on hybrid paradigms (e.g., integrating MCTS and reinforcement learning) and lightweight algorithms to balance performance with computational efficiency." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.757, + 0.488, + 0.907 + ], + "angle": 0, + "content": "5.1.4 Reasoning on Graph. Graph-structured reasoning offers a novel approach for multi-hop inference in RAG systems by explicitly modeling knowledge interaction paths through topology. Current methods fall into two categories: query-flow-oriented search graphs (e.g. FinSearch [50]) and knowledge-association-based expansion graphs (ToG-2.0 [60]) FinSearch builds a directed acyclic graph (DAG) where nodes are atomic subqueries (e.g., stock prices, financial reports) and edges capture logical and temporal dependencies. A pre-planner breaks down queries into subquery sequences," + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.092, + 0.913, + 0.151 + ], + "angle": 0, + "content": "using graph traversal to control information flow and dynamically adjust paths, such as backtracking when conflicts arise—substantially surpassing linear chain-of-thought methods in handling complex logic." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.168, + 0.913, + 0.529 + ], + "angle": 0, + "content": "5.1.5 External Solver. The integration of RAG and reasoning is also can be achieved by incorporating external solvers, where specialized solvers, such as the Alignment-Oriented LLM-based Retrieval Method (ARM), are employed to handle the reasoning component. The retrieval process for complex problems is formulated as a global optimization task, leveraging external solvers like mixed-integer programming (MIP) to achieve structural alignment and joint optimization of data objects. Specifically, ARM first decomposes user queries into keywords that match N-grams in the dataset through an information alignment module, generating an initial set of retrieval candidates via constrained decoding. Subsequently, in the structural alignment phase, the MIP solver performs global filtering on candidate objects based on a predefined objective function that maximizes both the relevance of retrieved objects to the query and their mutual compatibility. This ensures that the selected objects not only cover the requirements of the query but also form a coherent information chain through entity or inter-table linkages. Finally, the self-verification mechanism of the LLM, combined with a beam search-based aggregation strategy, dynamically refines and consolidates multiple candidate sets, ultimately producing a retrieval collection that satisfies both semantic matching and the structural organization of the data." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.53, + 0.913, + 0.845 + ], + "angle": 0, + "content": "ToG-2.0 achieves multi-hop expansion by integrating knowledge graphs with documents, starting from an initial entity and iteratively extending relevant entities and relations (such as corporate ownership chains and technology dependency networks) via the Edge function. This process constructs structured triple paths while simultaneously retrieving and verifying document content. By tuning the width and depth parameters, the method emulates human reasoning: broadly exploring potential associations before deeply verifying high-confidence paths. FRAG [23] dynamically adjusts retrieval strategies by predicting the hop range of reasoning paths based solely on the query text, thereby enhancing retrieval quality without requiring additional fine-tuning or invocation of large language models, enabling flexible and efficient retrieval optimization. FG-RAG [32] further expands entity coverage in graph retrieval through context-aware entity expansion, providing richer background information. Combined with query-level fine-grained summary generation, FG-RAG transforms coarse-grained graph information into highly relevant detailed content, effectively improving the performance of query-focused summarization tasks." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.847, + 0.927, + 0.907 + ], + "angle": 0, + "content": "Although differing in design from workflow-based methods, ToG-2.0 shares key advantages with other graph-structured approaches: explicitly modeling reasoning state dependencies, supporting dynamic path generation and optimization," + } + ], + [ + { + "type": "header", + "bbox": [ + 0.086, + 0.06, + 0.347, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.859, + 0.06, + 0.912, + 0.071 + ], + "angle": 0, + "content": "Gao et al." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.092, + 0.482, + 0.168 + ], + "angle": 0, + "content": "and enabling closed-loop interaction between retrieval and reasoning. This effectively overcomes the limitations of traditional RAG in implicit relation inference and counterfactual analysis, thereby establishing an interpretable theoretical and practical framework for knowledge reasoning." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.183, + 0.307, + 0.198 + ], + "angle": 0, + "content": "5.2 Reasoning Optimization" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.2, + 0.482, + 0.276 + ], + "angle": 0, + "content": "In the previous chapter, we focused on introducing several approaches to integrate reasoning with RAG. This chapter shifts attention to how to augment the reasoning capabilities, specifically including Prompt-Based, Tuning-Based, and RL-Based strategies." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.288, + 0.483, + 0.438 + ], + "angle": 0, + "content": "5.2.1 Prompt-Based. Prompt-Based optimization is a key approach to improving RAG and reasoning system performance by using carefully designed natural language prompts. These prompts break down complex reasoning tasks into manageable steps and guide LLMs to follow specific logical structures during generation. The main advantage is that control over reasoning flow is achieved solely through prompt design, without parameter fine-tuning or reinforcement learning, preserving the model's generalization while enhancing task-specific results." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.439, + 0.483, + 0.56 + ], + "angle": 0, + "content": "This approach has three main features. First, task structuring: prompts explicitly decompose and control reasoning chains via zero-shot or templated designs. Techniques like Co-STORM [43] and WriteHere [98] use role assignments, stage divisions, and operation-specific instructions to guide multi-step reasoning—such as proposal generation, knowledge retrieval, refinement, and validation—improving interpretability by representing intermediate steps clearly." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.56, + 0.483, + 0.68 + ], + "angle": 0, + "content": "Second, result reliability is improved by standardizing outputs and reducing hallucinations. Strategies include requiring citation of retrieval results, enforcing specific output formats, and integrating reflection and calibration based on retrieved knowledge. Systems like FinSearch [50] and ActiveRAG [100] incorporate temporal weighting, deduplication, and domain rules through prompts, enhancing consistency and logical coherence, especially in complex domains." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.681, + 0.483, + 0.816 + ], + "angle": 0, + "content": "Third, interactive adaptability allows dynamic prompt adjustments. Special tokens (e.g., , [Web-search]) enable models to trigger tools or revise queries in real time based on intermediate results. Methods such as Agentic Reasoning [92] and PlanRAG [48] use context-sensitive prompts and feedback loops to refine reasoning paths dynamically, maintaining coherence and accuracy in multi-hop tasks and outperforming traditional RAG methods in complex, evolving scenarios." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.817, + 0.483, + 0.907 + ], + "angle": 0, + "content": "In summary, prompt-based optimization offers an efficient, flexible, and reliable approach to enhancing RAG+Reasoning by emphasizing task structuring, result standardization, and interactive adaptability. Its non-intrusive and broadly applicable design has established it as a mainstream strategy for optimizing LLM reasoning and serves as a foundation" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.915, + 0.243 + ], + "angle": 0, + "content": "for future hybrid methods integrating fine-tuning and reinforcement learning. By systematically optimizing reasoning without altering model parameters through semantic structures, dynamic feedback, and symbolic constraints, this paradigm effectively manages macro-level controls like task decomposition and knowledge integration while addressing key challenges such as generation consistency, logical coherence, and external knowledge alignment. This makes prompt-based optimization a lightweight yet powerful solution for complex reasoning tasks." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.258, + 0.915, + 0.361 + ], + "angle": 0, + "content": "5.2.2 Tuning-Based. The tuning-based approach improves the integration of RAG and reasoning by optimizing model parameters to internalize the retrieval-augmented chain-of-thought mechanism within LLMs. Current research mainly targets three goals: retrieval pathway optimization, structured generation enhancement, and collaborative training with external modules." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.363, + 0.929, + 0.559 + ], + "angle": 0, + "content": "For retrieval pathway optimization, methods like CoRAG [83] and DeepRAG [24] build end-to-end multistep reasoning frameworks through full parameter fine-tuning and multitask learning. CoRAG expands single-step QA datasets into retrieval-reasoning chains and jointly trains tasks such as sub-query generation, intermediate answer prediction, and final composition. This boosts the model's ability to break down complex problems (e.g., multi-entity relational reasoning) and adapt retrieval strategies dynamically (e.g., query rewriting, error correction). DeepRAG combines imitation and contrastive learning with binary tree search to create efficient retrieval paths, using a DPO-style contrastive loss to reduce redundant retrieval while maintaining accuracy." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.56, + 0.915, + 0.816 + ], + "angle": 0, + "content": "To improve structured generation, MCTS-KBQA [97] and Self-RAG [3] fine-tune models for precise special token generation. MCTS-KBQA uses supervised fine-tuning to make large language models output instructions that comply with knowledge graph protocols (e.g., SPARQL), modeling reasoning as executable tool-call sequences. Self-RAG enhances self-supervised generation control by expanding vocabulary and training the model to generate reflection tokens like retrieval triggers and relevance markers, preserving fluency and reducing factual errors. Additionally, O1-Embedder [101] and Open-RAG [38] align semantic spaces via mixed fine-tuning: O1-Embedder combines generative and contrastive training with special tokens to separate generation from embedding tasks, enhancing multihop semantic understanding; Open-RAG uses QLoRA [17] quantized fine-tuning and Mixture of Experts (MoE) modules to specialize networks for single/multi-hop reasoning." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.817, + 0.914, + 0.907 + ], + "angle": 0, + "content": "In collaborative optimization with external modules, AdaptiveRAG [41] and CR-Planner [52] apply parameter isolation to balance generality and adaptability. AdaptiveRAG finetunes a lightweight classifier to select retrieval strategies dynamically. CR-Planner introduces a Critic model trained with contrastive loss on MCTS trajectory data to assess the" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.382, + 0.073 + ], + "angle": 0, + "content": "Synergizing RAG and Reasoning: A Systematic Review" + }, + { + "type": "header", + "bbox": [ + 0.652, + 0.06, + 0.914, + 0.073 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "table_caption", + "bbox": [ + 0.281, + 0.09, + 0.717, + 0.105 + ], + "angle": 0, + "content": "Table 1. Comparison of RL-based RAG with Reasoning Methods" + }, + { + "type": "table", + "bbox": [ + 0.085, + 0.118, + 0.916, + 0.72 + ], + "angle": 0, + "content": "
MethodBase ModelRLParameterSupervisionReward FunctionPolicy Strategy
PORAG [73]Qwen2.5/Llama3.2GRPOQLRAORMDual rewards: \n1. Retrieval fidelity (Rfid) \n2. Response quality (Rqual) \nCombined: R = αRfid + βRqual• Group-based advantage normalization \n• PPO-style clipped objective \n• KL regularization
DeepResearcher [106]Qwen2.5-7BGRPOFullORMFormat compliance penalty (-1) + Answer F1 score• Reference policy constraints \n• KL divergence penalty
ReSearch [6]Qwen2.5-7BGRPOFullORMHybrid rewards: \n• Answer F1 (vs ground truth) \n• Format compliance check• GRPO with clip ratio 0.2 \n• Group advantage normalization (G=5) \n• β = 0.001 KL penalty
ReZero [16]Llama3.2-3BGRPOFullORM+PRM• Answer correctness \n• Format compliance \n• Search diversity \n• Chunk matching \n• Retry behavior \n• Strategy compliance• Intra-group reward comparison \n• Noise-injected robustness training \n• KL constraints
MMOA-RAG [12]Llama-3-8BMAPPOFullORMShared F1 reward + penalties: \n• Excessive sub-questions \n• Document ID errors \n• Answer hesitability• MAPPO actor-critic updates \n• Cosine learning rate scheduling
DeepNote [84]Qwen2.5/Llama3.1DPOFullORMImplicit preference modeling via likelihood contrast• Direct Preference Optimization \n• Preference gap maximization
R1-Searcher [72]Qwen2.5/Llama3.1Reinforce++FullORMTwo-stage rewards: \n1. Retrieval count + format \n2. F1 score + format penalty• RAG-based rollout \n• Retrieval-masked loss
KBQA-O1 [58]Llama3/Qwen2.5/Gemma2MCTSDoRAORM+PRMComposite reward: \n• Stepwise policy model score \n• Final reward model score• MCTS trajectory optimization \n• Q-value backpropagation
DeepRetrieval [42]Qwen2.5-3BPPOFullORMTask metrics: \n• Recall@k/NDCG \n• Syntax validity• GAE advantage estimation \n• Distributed HybridFlow
LeReT [34]Llama3-8B/Gemma-9BIPOFullPRMAverage Precision (AP) of retrieved documents• Identity Policy Optimization \n• Context distillation
SmartRAG [20]Flan-T5-L/Llama2-7BPPOFull/LoRAORMAction-specific: \n• EM+F1 for answers \n• Cost penalty for retrievals• On-policy sampling \n• PPO updates
ReARTeR [75]LLaMA3.1-8BMCTSLoRAORM+PRMMonte Carlo step scoring + TD look-ahead• Iterative preference optimization \n• KTO loss
DeepRAG [24]Qwen2.5-7B/Llama3.1-8BHybridFullORM+PRMCost-aware accuracy: \nR = -C(o) × T(st) \nC(o): Answer correctness \nT(st): Total retrieval cost• Imitation + contrastive learning \n• PPO-like calibration
RAG-Gym [96]LLaMA3.1-8BHybridLoRAPRMTriple criteria: \n• Sufficiency \n• Utility \n• Redundancy• SFT + DPO \n• PRM-guided selection
CR-Planner [52]Skywork-Llama3.1-8BMCTSLoRAPRMCritic-estimated rewards: \n• Stepwise correctness \n• Global impact• MCTS simulation \n• Pairwise ranking loss
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.186, + 0.72, + 0.812, + 0.735 + ], + "angle": 0, + "content": "1ORM: Outcome-based Reward Model; PRM: Process-based Reward Model. 2Full: Full parameter tuning." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.761, + 0.481, + 0.79 + ], + "angle": 0, + "content": "long-term value of reasoning actions, prioritizing efficient solutions in tasks like mathematical reasoning." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.791, + 0.483, + 0.866 + ], + "angle": 0, + "content": "Together, these tuning strategies restructure the parameter space to internalize retrieval-reasoning interactions effectively, enhancing the model's ability to solve complex problems while ensuring computational efficiency and broad applicability across domains." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.877, + 0.486, + 0.907 + ], + "angle": 0, + "content": "5.2.3 RL-Based. As shown in Table 1, Reinforcement learning (RL) has recently become pivotal for tackling long-chain" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.761, + 0.916, + 0.882 + ], + "angle": 0, + "content": "reasoning in modern inference models and optimizing RAG combined with reasoning tasks. Central to these advances is the use of dynamic reward mechanisms that guide LLMs to balance knowledge retrieval and logical reasoning adaptively. RL optimization objectives generally fall into two categories: outcome-based reward modeling (ORM) and process-based reward modeling (PRM), with some hybrid approaches blending both to balance global goals and local optimizations." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.347, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.858, + 0.06, + 0.912, + 0.071 + ], + "angle": 0, + "content": "Gao et al." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.092, + 0.482, + 0.288 + ], + "angle": 0, + "content": "The ORM paradigm focuses solely on the quality of the final output and its adherence to standards. For example, R1-Searcher [72] employs a two-stage Reinforce++ [35] training where rewards in the first stage depend on correct retrieval calls and special token generation, while the second stage directly optimizes the F1 score of answers. This encourages the model to develop strategies maximizing knowledge integration, reducing hallucinations, and enhancing accuracy in multi-hop QA beyond traditional RAG methods. Similarly, KBQA-O1 [58] uses MCTS with a policy network for candidate reasoning paths and a reward model evaluating logical consistency, effectively balancing exploration and exploitation in knowledge base QA." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.288, + 0.483, + 0.439 + ], + "angle": 0, + "content": "Conversely, PRM emphasizes detailed supervision of intermediate reasoning steps. LeReT [34] uses the Identity Policy Optimization (IPO) algorithm, optimizing query quality by rewarding average precision (AP) of retrieved documents, boosting retrieval recall and overall multi-hop task performance. ReARTeR [75] extends this with a step-level binary reward model, combining Monte Carlo scoring and temporal difference (TD) methods to evaluate reasoning paths proactively, reducing logical errors and redundant retrievals, and improving accuracy on benchmarks like HotpotQA." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.439, + 0.487, + 0.65 + ], + "angle": 0, + "content": "Moreover, influenced by DeepSeek-R1, GRPO [69] is also gradually being applied in scenarios combining RAG and Reasoning. GRPO is a variant of the Proximal Policy Optimization (PPO) reinforcement learning algorithm that abandons the critic model and instead estimates the baseline from group scores, significantly reducing training resources. For example, ReZero [16] uses GRPO to introduce a \"retry\" mechanism for LLMs, incentivizing LLMs to keep trying after an initial search failure by rewarding retry search queries. This mechanism simulates the human strategy of \"if at first you don't succeed, try again\" in information retrieval. PORAG [73], based on GRPO, directly optimizes retrieval quality, contextual relevance, and generation coherence through a dual reward mechanism (retrieval fidelity and response quality)." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.65, + 0.483, + 0.816 + ], + "angle": 0, + "content": "Hybrid methods merge ORM and PRM to optimize both final outcomes and intermediate steps via composite rewards. SmartRAG [20] applies Proximal Policy Optimization (PPO), combining answer-level F1 rewards with penalties for excessive retrievals, balancing knowledge completeness and efficiency. RAG-Gym [96] advances this with multidimensional process rewards (sufficiency, utility, redundancy) and techniques like contrastive loss and Best-of-N sampling to promote efficient search decisions, even zero-shot. These hybrid strategies markedly lower retrieval costs while sustaining accuracy in complex tasks." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.817, + 0.483, + 0.893 + ], + "angle": 0, + "content": "In addition, we can also observe that in current RL-based methods, academia focuses more on exploration with small-scale LLMs (<8B), among which the Qwen and Llama series are the most widely used. Overall, RL provides a flexible, scalable framework for integrating RAG and reasoning. ORM" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.915, + 0.183 + ], + "angle": 0, + "content": "guides the discovery of globally optimal strategies, PRM enhances reasoning robustness via local refinements, and their combination addresses modular system limits. Future work may explore collaborative rewards in multi-agent settings, offline RL based on world models, and hierarchical reward decomposition for open-domain applications." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.196, + 0.857, + 0.212 + ], + "angle": 0, + "content": "6 Downstream Tasks and Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.216, + 0.915, + 0.352 + ], + "angle": 0, + "content": "While previous chapters focused on methodologies and advances in RAG combined with reasoning, this chapter shifts to tasks and evaluation. It provides a comprehensive overview and analysis of existing tasks, datasets, their current status, and emerging trends. By reviewing these resources, we highlight the landscape's gaps and limitations in current evaluation methods. The chapter also explores key challenges in assessment frameworks, identifying shortcomings and suggesting potential improvements." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.368, + 0.912, + 0.63 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.644, + 0.915, + 0.719 + ], + "angle": 0, + "content": "Figure 7. The current downstream tasks and datasets related to the combination of RAG and Reasoning show that multi-hop question answering tasks still dominate. Correspondingly, HotpotQA, 2WikiMultihopQA, and MuSiQue remain the most commonly used evaluation datasets." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.753, + 0.761, + 0.767 + ], + "angle": 0, + "content": "6.1 Knowledge-Intensive Tasks" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.771, + 0.915, + 0.907 + ], + "angle": 0, + "content": "In the evaluation for RAG systems, knowledge-intensive question answering (QA) remains the primary focus (Figure 7). As LLMs improve in semantic understanding and reasoning, benchmarks have expanded to cover tasks from simple fact retrieval to complex multi-step reasoning. However, evaluation methods specifically designed for RAG lag behind due to the dual challenge of assessing both retrieval-generation coherence and adaptability to dynamic knowledge bases. For example, multi-hop QA requires integrating" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.381, + 0.072 + ], + "angle": 0, + "content": "Synergizing RAG and Reasoning: A Systematic Review" + }, + { + "type": "header", + "bbox": [ + 0.653, + 0.06, + 0.912, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.092, + 0.482, + 0.182 + ], + "angle": 0, + "content": "dispersed knowledge through multi-stage retrieval while verifying logical consistency between answers and retrieval paths. This complexity increases dataset construction costs compared to purely generative tasks, keeping research centered on knowledge-intensive QA subcategories such as open-domain QA, knowledge-base QA, and multi-hop QA." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.183, + 0.508, + 0.44 + ], + "angle": 0, + "content": "Commonly used datasets include Natural Questions (NQ) [47] for single-hop factual queries, HotpotQA, 2WikiMultiHopQA [31] and Musique [79] for multi-hop QA. These benchmarks are mostly based on Wikipedia and fail to reflect the RAG demands and corresponding complexity in real-world scenarios. Some efforts have pushed evaluation boundaries, like CRUD-RAG's [59] operational metrics and DomainRAG's [86] domain-specific evaluations, but high costs and metric-task interdependencies limit progress. As a result, knowledge-intensive QA remains central for testing RAG robustness and practicality, highlighting a critical bottleneck: the need for innovative frameworks that balance retrieval flexibility and controlled generation to support new developments like Agentic RAG. Overall, many evaluation benchmarks are lagging behind rapid RAG+Reasoning advances, especially as LLMs grow more powerful. Specifically, the current evaluation of RAG faces the following challenges." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.45, + 0.482, + 0.556 + ], + "angle": 0, + "content": "Limited Challenge. With improving LLM capabilities, many knowledge-based questions are no longer difficult, as they can be answered without external retrieval. Current multi-hop reasoning datasets, often built from artificial templates, offer limited challenge. There is an urgent need for more complex datasets reflecting real-world scenarios and practical use." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.567, + 0.482, + 0.658 + ], + "angle": 0, + "content": "Lack of Specificity. Existing evaluation tasks are still predominantly focused on factual assessment and knowledge retrieval, lacking evaluations that probe deeper analytical thinking. This constraint limits the ability to measure a model's capacity for profound reasoning and cognitive depth." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.669, + 0.482, + 0.76 + ], + "angle": 0, + "content": "Task Uniformity. The majority of benchmarks are overly dependent on QA tasks, focusing on reactive, question-and-answer-based interactions. There is a pressing need to introduce tasks aligned with real-world applications, such as active information retrieval tasks based on personal knowledge or proactive knowledge discovery." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.771, + 0.482, + 0.907 + ], + "angle": 0, + "content": "Insufficient Dimensions. Evaluations are primarily end-to-end, focusing solely on final outcomes. However, with the introduction of reasoning processes, RAG+Reasoning systems have become iterative, multi-step frameworks. Current evaluations are unable to assess intermediate reasoning steps or retrieval chains effectively. The absence of step-by-step supervision data limits both research and training of related methods. Furthermore, current evaluation methodologies lack comprehensive assessments of system performance" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.092, + 0.912, + 0.122 + ], + "angle": 0, + "content": "trade-offs, such as computational cost and efficiency, which are critical for practical deployment." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.122, + 0.914, + 0.243 + ], + "angle": 0, + "content": "This emergent landscape necessitates the creation of a new generation of evaluation frameworks that can address these shortcomings. Such frameworks must not only ensure the adaptability of retrieval and the controllability of generation but also integrate intermediate reasoning evaluation and efficiency metrics, paving the way for the development of more robust and efficient RAG systems suited to diverse real-world applications." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.258, + 0.785, + 0.273 + ], + "angle": 0, + "content": "6.2 New Tasks on RAG+Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.277, + 0.915, + 0.488 + ], + "angle": 0, + "content": "Recently, combining RAG with reasoning has significantly improved models' ability to tackle more realistic and challenging tasks, raising the standards for evaluation methods. This subsection examines emerging tasks that assess their combined strengths, related tasks and datasets are shown in Table 2. Here, \"emerging\" refers not to entirely new tasks but to those with unprecedented complexity and demands. These include Deep Research tasks requiring multi-layered information integration and reasoning; PhD (Expert)-Level Complex Reasoning tasks targeting advanced scenario reasoning; and critical; domain-specific decision support tasks like medical diagnosis and legal analysis. Such tasks demand not only external knowledge retrieval but also logical consistency, coherence, and depth in reasoning." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.5, + 0.914, + 0.604 + ], + "angle": 0, + "content": "6.2.1 Deep Research. From the perspective of integrating RAG and reasoning, Deep Research tasks exemplify complex downstream applications. They require models to handle open-ended retrieval, produce long-form, structured text, and synthesize multi-source information through deep reasoning. This section analyzes their key features, evaluation datasets, and metrics." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.605, + 0.914, + 0.65 + ], + "angle": 0, + "content": "At the core of Deep Research tasks lies the mission of addressing complex informational queries. These tasks are distinguished by several key attributes:" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.65, + 0.914, + 0.74 + ], + "angle": 0, + "content": "First, dynamic interactivity is essential. Models engage in iterative dialogue to uncover latent user needs or \"unknown unknowns\". For example, the Co-Storm [43] framework enables collaboration with multiple language model agents to explore information gradually, easing user cognitive load and capturing unmet needs more accurately." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.741, + 0.914, + 0.816 + ], + "angle": 0, + "content": "Second, integrating information from multiple sources is crucial. Models must consolidate diverse data to provide comprehensive coverage. For instance, uses dynamic mind maps to structure knowledge and produce cohesive reports, ensuring accuracy and completeness." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.817, + 0.914, + 0.907 + ], + "angle": 0, + "content": "Third, expert-level accuracy is required. Many tasks demand domain expertise, expecting models to perform like human specialists. The Agentic Reasoning [92] framework illustrates this with high-stakes scenarios like medical treatment design or legal analysis, where outputs are judged on correctness, depth, and coherence." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.347, + 0.073 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.858, + 0.06, + 0.912, + 0.072 + ], + "angle": 0, + "content": "Gao et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.212, + 0.09, + 0.786, + 0.107 + ], + "angle": 0, + "content": "Table 2. Tasks and Datasets under the New Trend of RAG Combined with Reasoning" + }, + { + "type": "table", + "bbox": [ + 0.082, + 0.117, + 0.916, + 0.965 + ], + "angle": 0, + "content": "
Task TypeSub-TaskDatasetDescriptionScaleConstruction ByEvaluationPaper
Deep ResearchDeep ResearchAgentic ReasoningPHD-level dataset covering finance, medicine, and law.15-30 domainsPhD ExpertsExpert pass rate[92]
Report Genera-tionWildSeek [44]Info-seeking task-goal pairs for document generation.100 samplesRules/LLM/ManualLLM[98]
Report Genera-tionTELL ME A STORY [37]fiction writing evaluation dataset: detailed prompts and long-form narratives.230 samplesManualLLM[98]
Peer ReviewReview-5k [91]ICLR 2024 peer review dataset: paper metadata and structured reviewer feedback.4,991 papersOpenReview/arXivMSE/MAE/Acc[91]
Report Genera-tionResearch-14k [91]2022-2024 Accepted ML pa-pers: outlines, full texts, and cited abstracts.14,911 papersSemantic Scholar + arXivSimulated review scores[91]
Report Genera-tionSolutionBench [54]Engineering benchmark: constrained solutions across 8 real-world domains.1,050 datapointsManual/LLM ex-tractionAnalytical/ Tech-nical scores[54]
Mathematics & ReasoningMath ReasoningGPQA [67]PHD-level MCQs in physics, chemistry, and biology.744 setsPhD ExpertsAccuracy[92]
Math ReasoningMATH500 [55]500 math problems from the MATH test set.500 problemsPublic reposPass@K[51]
ProgrammingLiveCodeBench [40]Programming benchmark with easy, medium, and hard problems.1,055 problemsCompetition plat-formsPass@K[51]
ProgrammingUSACO [70]USA Computing Olympiad problems, testing algorithms and coding.307 problemsUSA Computing OlympiadPass@K[52]
Math ReasoningTheoremQA-Math [33]BRIGHT subset: theorem-based math problems.206 problemsSTEM datasetsAccuracy[52]
ProgrammingGorilla [64]API-aware code generation from HuggingFace, Torch Hub, TensorFlow Hub docs.1,600 APIsManualAST matching[73]
Math ReasoningOlympiadBench [29]Olympiad-level math compe-tition problems.1,000 problemsCompetitionsAccuracy/F1[109]
Complex Reason-ingComplexWebQA [76]Multi-step reasoning over web queries with cross-document integration.34,689 queriesWeb snippetsAccuracy[36]
Demanding RetrievalDomain RetrievalStackEcon & Stack-Bio [33]Biology and economics StackExchange questions for complex retrieval.206 queriesStackExchangenDCG@K[52]
Active RetrievalAR-Bench [14]Active retrieval benchmark with four sub-tasks.8k/sub-taskSyntheticAccuracy[14]
Real-timeTAQA [104]QA dataset with time-evolving answers.10K-100K rowsHuman-curatedLLM[14]
Real-timeFreshQA [80]Dynamic fact QA benchmark with evolving answers.600 samplesMixed sourcesLLM[14]
Domain RetrievalPubMed [42]PICO-based medical search dataset linking reviews to PubMed.21k+ samplesSystematic re-viewsRecall@K[42]
Domain RetrievalTrial search [42]PICO-based clinical trial search linked to ClinicalTrials.gov.7k+ samplesManuallyRecall@K[42]
Domain RetrievalFinSearchBench-24 [50]Financial retrieval benchmark covering stocks, rates, policy, trends.1,500 queriesManuallyAccuracy[50]
Decision & QABusinessDQA [48]Decision QA benchmark with business scenarios in enterprise settings.301 pairsvideo gamesAccuracy[48]
MedicalCMB-Clin [87]CMB subset for clinical diagnosis reasoning in Chinese medical cases.74 casesTextbooks/diagnosticLLM/Expert materials[11]
MedicalMM-Cases [11]Medicine cases generated by GPT-40-mini, verified by doctors.609 casesLLM/doctor-reviewedLLM/Expert[11]
MedicalTCM-Cases [11]TCM patient cases generated by GPT-40-mini, verified by doctors.130 casesLLM/doctor-reviewedLLM/Expert[11]
" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.086, + 0.061, + 0.38, + 0.072 + ], + "angle": 0, + "content": "Synergizing RAG and Reasoning: A Systematic Review" + }, + { + "type": "header", + "bbox": [ + 0.654, + 0.061, + 0.912, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.092, + 0.481, + 0.151 + ], + "angle": 0, + "content": "Fourth, multi-modal reasoning is often necessary. Deep Research tasks involve varied data types—text, code, knowledge graphs—and dynamic tool use such as web searches or code execution to enhance reasoning." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.153, + 0.481, + 0.241 + ], + "angle": 0, + "content": "Finally, handling multiple real-world constraints is vital. Tasks may require generating practical solutions under specific conditions, like designing hospitals in challenging environments with factors like heavy rainfall and seismic activity, as seen in the DeepSolution framework. This ensures outputs are feasible and relevant." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.243, + 0.481, + 0.287 + ], + "angle": 0, + "content": "To ensure the diversity and complexity of Deep Research tasks, their evaluation relies on datasets drawn from multiple domains. A few notable examples include:" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.288, + 0.482, + 0.438 + ], + "angle": 0, + "content": "WildSeek Dataset [44]: This dataset is constructed from real-world user information-seeking scenarios and comprises 100 data points covering 24 fields, including economics, computer science, and law. Each data point is characterized by a topic, user goal, and domain label. For example: \"Domain: Economics; Topic: Development of a Shared Trading Currency; Goal: Investigate how a new shared currency could eliminate transaction costs\". WildSeek effectively evaluates models' competence in dynamic interaction and multi-source information integration." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.439, + 0.482, + 0.665 + ], + "angle": 0, + "content": "GAIA [62]. The GAIA Benchmark, developed jointly by Meta AI, Hugging Face, and others, is a comprehensive evaluation framework designed to assess general AI assistants' ability to handle real-world problems. It features 466 carefully crafted tasks spanning language reasoning, visual perception, multi-agent collaboration, and adaptability, focusing on key skills like reasoning, multimodal processing, web browsing, and tool use. GAIA measures performance across dimensions such as task execution, adaptability, collaboration, generalization, and real-world reasoning with metrics like completion rate, response quality, efficiency, and robustness. Unlike traditional benchmarks, it emphasizes robustness and reliability in everyday scenarios, supports zero-shot evaluation, prevents data contamination, and is widely used in research and industry to guide AI development." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.666, + 0.481, + 0.801 + ], + "angle": 0, + "content": "SolutionBench [54]: This dataset spans eight engineering domains, including environmental, mining, and transportation engineering. Each instance presents a complex engineering problem with specific constraints. For example: \"Design a safe and efficient hospital construction plan in a region with 3000mm annual rainfall, expansive soils, and frequent seismic activity.\" SolutionBench evaluates models' ability to address multi-constraint problems and integrate specialized knowledge effectively." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.802, + 0.481, + 0.891 + ], + "angle": 0, + "content": "The current evaluation system for DeepResearch faces the dual challenges of scarce specialized testing tasks and the difficulty of assessing complex, lengthy reports: On one hand, existing benchmark tests only cover basic capabilities and lack systematic evaluation standards in specialized scenarios like business analysis and policy assessment; on the" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.092, + 0.913, + 0.182 + ], + "angle": 0, + "content": "other hand, the multimodal integration, logical chain verification, and domain adaptability testing of long reports pose technical bottlenecks for traditional assessment methods, necessitating the development of new evaluation tools that integrate logic graphs, dynamic scenario simulation, and domain knowledge bases." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.183, + 0.913, + 0.318 + ], + "angle": 0, + "content": "In the future, the evaluation system will evolve into a multidimensional framework, including the construction of a three-level indicator matrix covering basic capabilities, reasoning levels, and application value. Overcoming these evaluation bottlenecks requires both technological innovation and joint standard-building efforts. This concerns not only the reliability validation of intelligent research tools but also the reshaping of research evaluation paradigms and industrial application boundaries." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.334, + 0.913, + 0.664 + ], + "angle": 0, + "content": "6.2.2 PhD (Expert)-Level Complex Reasoning. The integration of RAG with advanced reasoning has become essential for tackling expert-level, complex cognitive tasks, particularly at the PhD level. These tasks, including competitive programming, theorem-driven proof reasoning, and cross-disciplinary knowledge retrieval, require multi-layered logical inference and precise coordination between dynamic retrieval and domain-specific knowledge. PhD-level reasoning differs from standard evaluations across three dimensions: knowledge intensity, procedural rigor, and domain specificity. Knowledge intensity demands dynamic access to deep, specialized knowledge, such as analyzing dynamic programming time complexity or applying algebraic topology theorems—needs that surpass general corpora and call for domain-specific knowledge graphs and retrieval methods. Procedural rigor involves mathematical precision in multistep proofs, requiring logical consistency in symbolic manipulation, theorem use, and counterexample refutation, as seen in international math competitions. Domain specificity reflects tailored reasoning methods, e.g., handling synchronization in concurrent programming or employing tensor calculus in quantum field theory." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.666, + 0.913, + 0.831 + ], + "angle": 0, + "content": "Evaluation systems for such tasks are inherently multilayered and multimodal. The USACO Benchmark [71] offers a graduated difficulty scale for programming reasoning, testing both correctness and algorithmic constraints like time complexity. TheoremQA-Math [9] links formalized math problems to theorem libraries, demanding verifiable mappings between theorem applications and calculations. Cross-disciplinary datasets like StackBio and StackEcon [53] assess models' ability to extract critical knowledge from dense, domain-rich documents, serving as strong tests for domain-oriented retrieval accuracy." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.832, + 0.913, + 0.906 + ], + "angle": 0, + "content": "Modern evaluation surpasses traditional end-to-end tests by combining process and outcome validation. Frameworks like CR-Planner [52] use dual models—a Sub-Goal Critic to score reasoning chains and an Execution Critic to evaluate retrieval—allowing fine-grained step monitoring. For" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.086, + 0.061, + 0.345, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.859, + 0.061, + 0.912, + 0.071 + ], + "angle": 0, + "content": "Gao et al." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.092, + 0.482, + 0.226 + ], + "angle": 0, + "content": "example, in dynamic programming, key steps like formulating state transitions and retrieving boundary conditions receive targeted feedback. Similarly, Search-O1 [51] quantifies knowledge completeness by tracking uncertainty indicators (e.g., tentative language), measuring confidence and accuracy. Outcome validation maintains strict correctness benchmarks in programming and combines metrics like F1 scores with expert review in open-domain scientific QA to ensure precise understanding of domain-specific terms." + }, + { + "type": "title", + "bbox": [ + 0.088, + 0.24, + 0.371, + 0.254 + ], + "angle": 0, + "content": "6.3 Challenges and Future Directions" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.259, + 0.482, + 0.468 + ], + "angle": 0, + "content": "6.3.1 Complex Domain Tasks. Recent advances in RAG have provided novel solutions for more complex tasks in professional domains. These downstream tasks transcend the limitations of traditional question-answering models that rely solely on simple retrieval-generation patterns, involving challenges such as real-time information acquisition, integration of domain expertise, and dynamic decision-making support. The nature of these tasks can be characterized along three interrelated dimensions: (1) temporal dynamics, emphasizing the rapid changes in data and reasoning environment; (2) domain specificity, focusing on deep integration of industry knowledge and structured data; and (3) reasoning chain complexity, reflecting requirements for multi-stage reasoning and fine-grained decomposition of queries." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.47, + 0.493, + 0.755 + ], + "angle": 0, + "content": "To rigorously evaluate such systems, innovative benchmarking approaches have been proposed. The FinSearchBenchmark-24 dataset, for example, encompasses five months of market data variations, integrating multi-variable interactions across stock, policy, and industrial sectors, and includes over 1,500 multiple-choice questions, thereby surpassing the constraints of traditional static benchmarks. The evaluation adopts a hierarchical and quantitative methodology: the foundational level measures model accuracy and response latency; the intermediate layer assesses the temporal sensitivity of information relevance and the contribution of retrieval mechanisms to reasoning outcomes; and the advanced layer employs ablation studies to highlight performance variances under dynamic temporal decay. This multifaceted evaluation not only differentiates surface-level retrieval capabilities but also rigorously measures the synergy between reasoning quality and temporal context, furnishing theoretical and practical foundations for long-term stability and predictive accuracy in complex domain systems." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.757, + 0.482, + 0.906 + ], + "angle": 0, + "content": "Experimental findings further reveal that establishing long-term evaluation protocols with temporal weighting functions is indispensable for adapting to realistic dynamic environments. Nonlinear declines in decision accuracy, observed when extending relevance windows from 72 to 168 hours, emphasize the importance of factoring temporal decay into assessment frameworks. Future work should extend these evaluation protocols to high-stakes domains such as medical diagnostics and legal consultation, where the standardization of interpretability metrics will critically support" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.093, + 0.911, + 0.121 + ], + "angle": 0, + "content": "the evolution of RAG+ reasoning systems toward robust and trustworthy decision-assistance platforms." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.133, + 0.913, + 0.508 + ], + "angle": 0, + "content": "6.3.2 Decision Support and Active Retrieval. The expansion of RAG+Reasoning frameworks into specialized tasks has fostered two complementary research paradigms: decision optimization and active retrieval. In the decision optimization category, systems must leverage heterogeneous structured data, rule bases, and objective functions to formulate optimal strategies. Representative systems like PlanRAG formalize Decision Question Answering (Decision QA) tasks targeting enterprise-level scenarios including supply chain optimization, industrial resource allocation, and market price regulation. These tasks require planning multimodal reasoning paths where models iteratively retrieve data from relational and graph databases, integrate intricate business rules, and iteratively refine decision-making paths through replanning mechanisms. To evaluate such capabilities, the Decision QA (DQA) benchmark creates dual database versions (MySQL and Neo4j) derived from economic systems in strategy games, assessing cross-structured generalization. The evaluation consists of a three-tier framework: the core tier measures answer accuracy; the intermediate layer diagnoses error types to identify system bottlenecks; and the foundational tier focuses on retrieval efficiency and the impact of replanning frequency. This structured evaluation framework not only tracks performance but also offers actionable insights for system refinement." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.51, + 0.913, + 0.644 + ], + "angle": 0, + "content": "Conversely, the active retrieval evaluation addresses the challenge of dynamically determining when and how to invoke retrieval under complex multimodal contexts. Unlike rigid traditional RAG systems, UAR applies lightweight classifiers for fast, accurate triggers, improving performance in time-sensitive or creative tasks. Tested on AR-Bench, it combines binary trigger accuracy with GPT assessments, exact matches, and human reviews, boosting adaptability across diverse contexts." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.646, + 0.913, + 0.794 + ], + "angle": 0, + "content": "Emerging trends in these evaluation paradigms indicate a shift from static, rule-based frameworks to dynamic system simulations, as exemplified by DQA's use of game engine-generated datasets to simulate realistic environments. Similarly, active retrieval tasks progress from simple retrieval trigger decisions toward collaborative multi-criteria decision-making. Evaluation methodologies are concurrently evolving from singular performance metrics to multidimensional matrices comprising core effectiveness, diagnostic error distributions, and economic cost measures." + }, + { + "type": "title", + "bbox": [ + 0.519, + 0.812, + 0.671, + 0.826 + ], + "angle": 0, + "content": "7 Cost and Risk" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.833, + 0.912, + 0.906 + ], + "angle": 0, + "content": "Integrating reasoning into RAG systems is neither effortless nor purely beneficial. Recent trends have exaggerated its advantages while downplaying the costs and risks. This trade-off between performance and cost is crucial. This section examines the expenses and misuse risks linked to adding" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.382, + 0.073 + ], + "angle": 0, + "content": "Synergizing RAG and Reasoning: A Systematic Review" + }, + { + "type": "header", + "bbox": [ + 0.653, + 0.06, + 0.914, + 0.073 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "image", + "bbox": [ + 0.087, + 0.089, + 0.913, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.117, + 0.348, + 0.878, + 0.364 + ], + "angle": 0, + "content": "Figure 8. From LLM to RAG and then to RAG+Reasoning, performance improvement comes with additional cost." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.384, + 0.483, + 0.459 + ], + "angle": 0, + "content": "reasoning to RAG systems. As shown in Figure 8, the cost of moving from LLM to RAG, then to RAG + Reasoning, incurs an inevitable \"invisible tax\". Though often hidden by performance gains, this cost is vital in assessing these methods' overall practicality and efficiency." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.459, + 0.483, + 0.61 + ], + "angle": 0, + "content": "The shift from LLM to RAG moves from simplicity to enhanced knowledge handling by incorporating external information. A basic LLM provides direct, efficient answers with low latency and token use but is limited to pre-trained knowledge, restricting complex or up-to-date queries. RAG overcomes this by adding a vector database for external retrieval, vastly expanding response scope and reliability. However, this requires substantial data processing, storage, and introduces higher latency and token costs due to data chunking, encoding, indexing, and retrieval overhead." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.611, + 0.483, + 0.779 + ], + "angle": 0, + "content": "Advancing from RAG to RAG + Reasoning adds multistep reasoning capabilities, enabling complex task handling, autonomous decisions, and more context-aware responses through intricate reasoning. This comes at the expense of increased delays, token consumption, processing demands, and greater complexity in system integration and maintenance. The reasoning layer's autonomy also brings opaqueness, unpredictability, and heightened security and reliability risks. These challenges highlight the necessity of carefully balancing effectiveness against costs when adopting RAG + Reasoning in real-world applications." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.798, + 0.379, + 0.813 + ], + "angle": 0, + "content": "7.1 Cost Trade-off in RAG+Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.817, + 0.483, + 0.907 + ], + "angle": 0, + "content": "Figure 9 illustrates typical works combining RAG and Reasoning, showing retrieval and reasoning demands alongside token consumption. While integrating dynamic knowledge retrieval with multi-step reasoning greatly improves accuracy in more complex tasks, the resulting systemic costs are often underestimated in research and practice. These costs" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.384, + 0.916, + 0.52 + ], + "angle": 0, + "content": "grow non-linearly, causing serious efficiency bottlenecks in real-world use. The tradeoff between effectiveness and efficiency stems from RAG+Reasoning's architecture: multistage task decoupling, dynamic path planning, and intermediate state preservation. These features improve reasoning quality but trigger cascading increases in computational resources, token usage, and reduced retrieval efficiency. This section explores these implicit tradeoffs from the angles of resource use, token consumption, and retrieval efficiency." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.532, + 0.918, + 0.804 + ], + "angle": 0, + "content": "7.1.1 Non-Linear Growth of Computational Resources. The RAG+Reasoning framework separates retrieval and reasoning into multiple stages, causing computational demands to grow non-linearly. Dynamic chain-of-reasoning methods execute multiple LLM generations and retrievals per inference, resulting in complexity far exceeding baseline models. Fixed-length reasoning chains trigger repeated retrieval and generation calls, increasing resource needs with task complexity. More advanced techniques like MCTS-guided methods add rounds of candidate path generation and evaluation, further multiplying runtime and memory usage on GPUs compared to linear methods. Even simpler multi-step planning tasks incur much higher overhead than single-stage retrieval models due to extra graph construction and analysis. While this resource intensity improves inference accuracy, it poses serious scalability challenges under limited resources as computational costs grow superlinearly with model size, retrieval chain length, and task complexity." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.817, + 0.93, + 0.907 + ], + "angle": 0, + "content": "7.1.2 Implicit Token Inflation. Multi-step reasoning frameworks inherently cause significant token inflation through iterative intermediate processes like thought chains, retrieved documents, and verification feedback. Active learning setups consolidate multiple intermediate results—retrieved documents, counterfactuals, multi-round validations—leading to" + }, + { + "type": "list", + "bbox": [ + 0.513, + 0.532, + 0.93, + 0.907 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.347, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.858, + 0.06, + 0.912, + 0.071 + ], + "angle": 0, + "content": "Gao et al." + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.089, + 0.916, + 0.481 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.25, + 0.493, + 0.746, + 0.509 + ], + "angle": 0, + "content": "Figure 9. Cost quadrant diagram of retrieval and reasoning requirements" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.529, + 0.484, + 0.681 + ], + "angle": 0, + "content": "token usage well beyond typical limits. Chain-based retrieval also generates token bloat due to exhaustive candidate path exploration. Iterative reasoning path selection, expansion, and evaluation add heavy token overhead in tasks needing deep reasoning chains involving extensive sequence generation and evaluation. Token usage grows exponentially with task complexity and increases further when intermediate reasoning favors depth or breadth. This inflation raises API costs and memory demands, especially in long-text generation like Deep Research [106]." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.696, + 0.487, + 0.907 + ], + "angle": 0, + "content": "7.1.3 Marginal Decline in Retrieval Efficiency. Dynamic retrieval improves knowledge precision but suffers diminishing efficiency as task complexity increases. Adaptive methods reduce retrievals for simple tasks but still require multiple iterations for complex ones, adding significant overhead compared to standard RAG. The tradeoff between retrieval quality and frequency further limits efficiency. High-accuracy retrieval methods incur heavy computational and time costs, negating their efficiency benefits. Even advanced retrieval-trigger optimizations can't fully remove this overhead due to extra training and deployment costs [41]. This natural efficiency ceiling highlights ongoing challenges in balancing retrieval accuracy and resource use, especially in large, complex tasks." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.529, + 0.916, + 0.786 + ], + "angle": 0, + "content": "7.1.4 Toward a Cost Model Framework. Against this backdrop, the development of fine-grained cost models becomes a necessary precondition for balancing effectiveness and efficiency. Existing evaluation metrics, which often rely on single-task performance indicators (such as Exact Match or F1) or coarse-grained runtime statistics, lack the comprehensiveness to jointly model computational resources, token flow, and retrieval overhead. Consequently, they fail to quantify the true tradeoffs in reasoning mechanisms. For instance, while multi-hop reasoning may improve task accuracy, these improvements are frequently offset by exponential growth in token consumption and latency relative to baseline methods. A fine-grained cost model would enable researchers and practitioners to more accurately evaluate the real benefits of reasoning-centric frameworks while addressing the underexplored interplay between computational cost and task performance." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.798, + 0.798, + 0.813 + ], + "angle": 0, + "content": "7.2 Potential Risk of Over-Thinking" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.816, + 0.916, + 0.907 + ], + "angle": 0, + "content": "In the process of developing deep thinking models, \"overthinking\" poses a key risk to system efficiency and reliability [10, 15, 19, 30, 74, 81], and this issue is further amplified after combining with RAG. It appears as redundant reasoning steps, excessive validation of known conclusions, or unnecessarily broad retrieval scopes, wasting computational" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.086, + 0.061, + 0.38, + 0.072 + ], + "angle": 0, + "content": "Synergizing RAG and Reasoning: A Systematic Review" + }, + { + "type": "header", + "bbox": [ + 0.654, + 0.061, + 0.912, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.092, + 0.482, + 0.287 + ], + "angle": 0, + "content": "resources, increasing error propagation, and degrading performance. For example, in financial risk assessment, an LLM with RAG might retrieve multiple similar market reports and repeatedly verify the same economic indicators rather than focusing on core risks, leading to delayed decisions. This stems from an imbalance between reasoning and retrieval: after accessing external knowledge, the model can enter a \"self-validation loop,\" repeatedly parsing overlapping or contradictory documents. The generation module, seeking reliability, may trigger further retrievals, creating a feedback loop that worsens inefficiency. This issue is critical in real-time systems like medical diagnosis, where over-retrieval of irrelevant literature can delay urgent decisions." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.288, + 0.482, + 0.469 + ], + "angle": 0, + "content": "Case studies show the impact of overthinking [74]. In legal document interpretation, early reasoning errors can amplify through the retrieval-generation loop, causing retrieval along incorrect paths and yielding illogical conclusions. This error propagation is evident in systems like the Search-o1 [51], where flawed information extraction misguides subsequent reasoning. In industrial equipment manual interpretation, overextended reasoning with highly similar documents risks obscuring critical parameter differences, increasing procedural errors. These examples illustrate that overthinking not only hampers knowledge integration but also creates safety hazards in practical applications." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.47, + 0.501, + 0.68 + ], + "angle": 0, + "content": "To mitigate these risks, researchers propose multiple optimization frameworks. ReaRAG [49] limits reasoning chain length and incorporates self-reflection to prune invalid branches. A simple and effective way is to use a two-stage filtering process, first narrowing documents by metadata, then validating fragment relevance, reducing redundant information—for instance, retrieving only relevant legal clauses rather than entire regulatory texts. The DeepSeek R1 [26] applies reinforcement learning with distillation to penalize redundant steps, cutting repeated formula validation in math proofs by over \\(40\\%\\). These approaches transform open-ended reasoning into controlled, goal-directed processes, using methods like attention weight analysis to measure information gain or confidence functions to evaluate reasoning paths." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.681, + 0.482, + 0.875 + ], + "angle": 0, + "content": "Current research balances constraints with model creativity. Knowledge graph-guided reasoning is tested in clinical trials to prioritize key medical features over exhaustive literature retrieval [11]. Causal reasoning models aim to break error chains; for example, in financial forecasting, causal graphs restrict reasoning to logically relevant macroeconomic links. Adaptive stopping strategies adjust reasoning depth in customer service—simple queries use preset templates, complex issues activate multi-hop reasoning. These advances reshape retrieval-augmented reasoning, with the core challenge being to develop evaluation frameworks that avoid both \"cognitive stagnation\" from excessive constraints and \"cognitive overload\" from insufficient control." + }, + { + "type": "text", + "bbox": [ + 0.088, + 0.877, + 0.512, + 0.907 + ], + "angle": 0, + "content": "Future progress will integrate cognitive science with computational modeling. By mimicking human \"intuition-verification\"" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.092, + 0.913, + 0.197 + ], + "angle": 0, + "content": "decision-making, LLMs could switch seamlessly between rapid response and deep reasoning. In high-risk fields like industrial fault diagnosis, such hybrid models can quickly propose contingency plans after initial retrieval while verifying their validity through deeper analysis. This layered approach reduces overthinking risks and offers a safe, controllable path for applying LLMs in critical industries." + }, + { + "type": "title", + "bbox": [ + 0.519, + 0.212, + 0.684, + 0.227 + ], + "angle": 0, + "content": "8 Practical Guide" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.232, + 0.913, + 0.412 + ], + "angle": 0, + "content": "The combination of RAG and Reasoning is not a one-size-fits-all solution; it requires careful evaluation of each scenario's unique needs. As a rapidly evolving and relatively new field, practical applications are still limited, making best practices hard to define. This chapter abstracts and summarizes the key traits of typical RAG+Reasoning application domains and offers practical guidelines for system design based on these features. It provides recommendations on leveraging RAG's strengths with Reasoning, highlighting priorities, pitfalls to avoid, and current opportunities (Figure 10). The goal is to promote wider adoption and effective use of this technology in diverse, complex real-world settings." + }, + { + "type": "title", + "bbox": [ + 0.519, + 0.427, + 0.726, + 0.44 + ], + "angle": 0, + "content": "8.1 Domain characteristics" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.445, + 0.913, + 0.837 + ], + "angle": 0, + "content": "As illustrated in the left part of Figure 10, we develop a seven-dimensional feature system based on the three core stages of RAG—query, retrieval, and generation—to systematically analyze challenges and adaptation needs across various industries. The query stage emphasizes the complexity of intent understanding and the demand for advanced reasoning, recognizing that industries differ in query abstraction and specificity; some require quickly capturing implicit, deep intentions, while others need complex reasoning. Effective preservation of original semantic meaning during understanding and reasoning is key to improving RAG performance. Retrieval focuses on the system's adaptability to diverse and dynamic knowledge sources, which vary from rich multi-domain data to rapidly updating information; frequent updates and fragmented knowledge present challenges that demand effective integration to ensure consistent support for generation. The generation stage requires high-quality outputs, with strict control over hallucinations—especially critical in sensitive fields like healthcare and law—along with varying latency requirements for real-time or delayed responses. Explainability and traceability at this stage are essential for system credibility and serve as key evaluation metrics. This comprehensive framework reveals technical bottlenecks and guides improvements, and is applied to analyze four representative domains: finance, healthcare, law, and personal assistants." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.847, + 0.913, + 0.907 + ], + "angle": 0, + "content": "8.1.1 Finance. In the finance domain, user queries typically focus on structured needs like investment decisions and risk forecasting. While intent understanding is moderately complex, the system must perform advanced reasoning amid" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.347, + 0.073 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.858, + 0.06, + 0.912, + 0.072 + ], + "angle": 0, + "content": "Gao et al." + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.088, + 0.913, + 0.346 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.29, + 0.361, + 0.709, + 0.377 + ], + "angle": 0, + "content": "Figure 10. Practical guide to synergizing RAG and Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.397, + 0.484, + 0.609 + ], + "angle": 0, + "content": "rapidly changing market conditions, relying heavily on external knowledge and frequent updates. For example, portfolio return forecasting integrates time series analysis, policy interpretation, and cross-market reasoning. Retrieval demands handling diverse data sources—real-time market data, annual reports, and regulatory filings—with update cycles often measured in minutes. During generation, strict latency and hallucination control are crucial, as outputs must include decision-making suggestions with full data traceability. Investment research reports, for instance, require annotated key indicators, their data sources, and computation logic to ensure transparency and regulatory compliance. High latency control and robust traceability are essential to maintain transparency and adherence to financial regulations." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.621, + 0.49, + 0.788 + ], + "angle": 0, + "content": "8.1.2 Healthcare. Healthcare queries involve complex medical semantic parsing, often with ambiguous terms or incomplete symptoms. For example, \"persistent chest pain with shortness of breath\" requires multi-hop reasoning across cardiology, pulmonology, and emergency medicine. Retrieval must integrate electronic health records, medical imaging, and up-to-date clinical guidelines. In generation, hallucination tolerance is minimal—errors in drug dosages or protocols risk malpractice. Therefore, accuracy, timeliness, and explainability are paramount, with every decision step traceable and verifiable." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.801, + 0.484, + 0.907 + ], + "angle": 0, + "content": "8.1.3 Legal Services. Legal consultations often require interpreting statutes and citing cases, balancing precise legal terms with natural language nuances. Retrieval depends on structured, infrequently updated sources like case law databases and local regulations. Generation demands accuracy—for instance, drafting contract clauses must precisely cite specific statutes (e.g., Article 472 of the Civil Code) down" + }, + { + "type": "list", + "bbox": [ + 0.082, + 0.621, + 0.49, + 0.907 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.396, + 0.916, + 0.456 + ], + "angle": 0, + "content": "to the paragraph level for traceability. Explainability is essential, with traceability usually above \\(95\\%\\), and probabilistic language avoided to comply with strict judicial documentation standards." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.474, + 0.916, + 0.701 + ], + "angle": 0, + "content": "8.1.4 Personal Assistants. This domain features diverse, dynamic user needs, including schedule management, real-time navigation, and open-domain conversations. Accurate intent disambiguation through contextual awareness is crucial. Retrieval integrates fragmented sources like user behavior logs, geolocation, and social media. Generation latency varies: weather updates require sub-second responses, while travel planning can tolerate \\(5+\\) seconds. Hallucination tolerance depends on context—creative outputs are acceptable for recipes but not for flight information, which demands full accuracy. This necessitates adaptive verification in the RAG system. Though intent complexity is lower than in healthcare or legal fields, the domain's interaction diversity requires heavy reliance on external knowledge and dynamic balancing of latency and accuracy." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.721, + 0.673, + 0.735 + ], + "angle": 0, + "content": "8.2 Do's and Don'ts" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.739, + 0.916, + 0.815 + ], + "angle": 0, + "content": "Building on aforementioned domain characteristics, we further identify six common scenarios, and derive technical adaptation principles for each. This section outlines key optimization strategies (Do's) and prohibitions (Don'ts), to guide the co-design of RAG and reasoning." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.832, + 0.916, + 0.908 + ], + "angle": 0, + "content": "8.2.1 Structured Reasoning Scenarios. For scenarios requiring multi-step logical decomposition and structured knowledge dependency, such as portfolio return prediction, Chain-of-Thought (CoT) task decomposition and knowledge graph (KG)-driven graph reasoning approaches should be" + }, + { + "type": "list", + "bbox": [ + 0.513, + 0.739, + 0.916, + 0.908 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.086, + 0.061, + 0.381, + 0.072 + ], + "angle": 0, + "content": "Synergizing RAG and Reasoning: A Systematic Review" + }, + { + "type": "header", + "bbox": [ + 0.654, + 0.061, + 0.912, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.092, + 0.482, + 0.317 + ], + "angle": 0, + "content": "employed. Complex problems should be broken into verifiable sub-tasks, such as coupling market trend analysis with policy impact assessment, while leveraging knowledge graph constraints to ensure logical completeness and auditability. It is essential to incorporate a temporal validation layer to cross-check the consistency of timestamp-sensitive information (e.g., real-time market data or emergent regulatory policies) within a dynamic knowledge base. Approaches that exclude retrieval-based verification of salient features must be avoided, as they may lead to reasoning biases arising from the absence of structured knowledge anchors (e.g., critical indicators from financial statements). Furthermore, the reasoning space of LLMs should be constrained within domain-specific knowledge frameworks to prevent irrelevant or invalid deductions." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.326, + 0.483, + 0.642 + ], + "angle": 0, + "content": "8.2.2 Dynamic Demand-Responsive Scenarios. For scenarios characterized by rapidly shifting demands and user preference variability, such as itinerary planning and multimodal interaction in personal assistant services, a dynamic adaptation mechanism based on prompt engineering is recommended. By dynamically associating fragmented knowledge units (e.g., user behavior history and real-time traffic updates) with semantic templates and employing heuristic rules for search-space pruning (e.g., prioritizing locally updated information within the past 24 hours), the system can balance contextual adaptability with response speed. Model fine-tuning or reinforcement learning (RLHF/DPO)-based strategy updates should be avoided due to their lengthy iterative cycles and computational overhead, which cannot meet real-time responsiveness requirements, such as millisecond-grade reaction times for last-minute destination changes. Lightweight caching architectures should be implemented within the retrieval system, prioritizing frequently accessed knowledge fragments, such as operating hours of popular tourist attractions, to achieve an equilibrium between dynamism and stability." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.651, + 0.483, + 0.906 + ], + "angle": 0, + "content": "8.2.3 Deterministic Decision-Making Scenarios. In scenarios requiring a single, reliable conclusion, such as clinical diagnosis generation in the healthcare domain, a multi-level deterministic assurance system should be established. Time-validation layers can filter outdated knowledge (e.g., therapies no longer approved), while field-sensitive retrieval modules trigger predefined decision rules conforming to up-to-date clinical guidelines (e.g., those codified within the latest version of the International Classification of Diseases [ICD]). Knowledge graph path constraints should restrict the reasoning process to validated causal links within medical logic (e.g., linking symptom patterns to laboratory test results within corroborated diagnostic pathways), thereby minimizing the likelihood of deviations from standard protocols. Probabilistic exploration strategies that generate alternative hypotheses (e.g., speculative differential diagnoses for atypical pneumonia) should be strictly disallowed to avoid clinical" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.093, + 0.913, + 0.152 + ], + "angle": 0, + "content": "misjudgments. Additionally, delegating decision-making authority to external classification models must be avoided to maintain end-to-end explainability and a clear causal link in the decision-making pipeline." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.162, + 0.913, + 0.448 + ], + "angle": 0, + "content": "8.2.4 Time-Sensitive Scenarios. In tasks highly sensitive to response delays, such as real-time risk warnings and trading decisions in the financial sector, heuristic rules should be employed to prioritize indexing of frequently queried knowledge units (e.g., volatility indices and liquidity indicators) at the top of the search hierarchy. Directed retrieval expansion strategies that preload potentially associated information (e.g., contractual clauses of derivative instruments tied to underlying assets) can further reduce latency in multi-turn interactions. Monte Carlo Tree Search (MCTS) and other sample-based algorithms are ill-suited for such scenarios due to the excessive computational complexity caused by branch expansion, rendering them infeasible within tight time constraints (e.g., milliseconds). Similarly, the invocation of complex mathematical solvers (e.g., numerical solutions for stochastic differential equations) can introduce uncontrollable delays and should be replaced with lightweight rule-based mechanisms (e.g., threshold-triggering mechanisms based on historical volatility ranges)." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.46, + 0.913, + 0.79 + ], + "angle": 0, + "content": "8.2.5 Risk-Sensitive Scenarios. For scenarios with minimal tolerance for errors, such as contract clause generation and citation of judicial interpretations in the legal sector, a dual-layer defensive mechanism must be employed. A pre-action review layer should validate the compliance of generated content with statutory standards (e.g., ensuring consistency between liability clauses and Article 577 of the Civil Code), while a reliability validation layer performs cross-referencing validation across multiple sources (e.g., aligning Supreme Court precedents with regional court guidelines) to resolve potential conflicts. Retrieval systems must include version control modules to track and update legal references (e.g., automatically flagging repealed local statutes). Unconstrained reinforcement learning-based text generation methods must be avoided, as their exploratory nature risks violating the normative requirements of legal documents (e.g., generating presumptive liability terms unsupported by judicial interpretations). All decision-making actions must pass through deterministic rule engines to filter inadmissible outputs, and the system should never execute decision actions autonomously, such as generating legally binding arbitration notices without oversight." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.802, + 0.913, + 0.906 + ], + "angle": 0, + "content": "8.2.6 Complex Path Exploration Scenarios. In exploration tasks involving multiple possible trajectories, such as differential diagnosis and therapeutic pathway optimization in medicine, weighted ranking search algorithms should balance search depth and breadth. Knowledge graph topology can guide prioritization (e.g., standard treatment procedures for acute coronary syndrome), while Monte Carlo Tree" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.086, + 0.06, + 0.347, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.859, + 0.06, + 0.912, + 0.071 + ], + "angle": 0, + "content": "Gao et al." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.092, + 0.483, + 0.289 + ], + "angle": 0, + "content": "Search can extend exploration into uncommon differential paths (e.g., rare genetic metabolic disorders). Dynamic pruning threshold functions should be designed (e.g., adjusting the scope of differential diagnosis based on patient history) to eliminate low-confidence hypotheses in real time, thereby controlling computational scale. Brute-force searching of all potential paths (e.g., concurrently testing hundreds of pathogens for nonspecific symptoms) should be avoided to prevent exponential computational scaling. Careful handling of specific token triggers during retrieval (e.g., avoiding spurious associations between \"fever\" and unrelated oncological hyperthermia research) is critical to maintaining logical coherence in diagnostic reasoning." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.306, + 0.27, + 0.321 + ], + "angle": 0, + "content": "8.3 Opportunity Points" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.323, + 0.483, + 0.43 + ], + "angle": 0, + "content": "Based on the Do's and Don'ts of current technologies analyzed in the previous section, there remain numerous directions with substantial academic value and application potential that have yet to be fully explored. This section systematically discusses several promising opportunity points across three dimensions: data and indexing, models and methodologies, and application services." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.444, + 0.277, + 0.458 + ], + "angle": 0, + "content": "8.3.1 Data and Indexing." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.471, + 0.483, + 0.621 + ], + "angle": 0, + "content": "Cold-Hot Tiered Indexing and Dynamic Context Management. The challenge of managing massive and highly heterogeneous data resources lies in devising an effective cold-hot tiered indexing mechanism that prioritizes data according to their frequency of use and importance. Such a mechanism not only demands classification of data based on timeliness and access frequency but also requires integration with dynamic context management. This allows the system to intelligently retrieve the most relevant data according to the immediate context." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.622, + 0.483, + 0.759 + ], + "angle": 0, + "content": "Moreover, a dynamically updated indexing mechanism can mitigate the loss of data timeliness, which often leads to deteriorated inference accuracy. By ensuring access to the most recent and task-appropriate data, this approach reduces redundancy and incorrect retrievals associated with static indexing. When combined with automated task scheduling and resource allocation strategies, fine-grained real-time inference support can be achieved, significantly enhancing the system's overall efficiency." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.771, + 0.483, + 0.907 + ], + "angle": 0, + "content": "Cross-Institution Knowledge Base Construction. The construction of cross-institution or cross-domain knowledge bases offers new opportunities for advancing RAG+Reasoning research. At the core of large-scale cross-institutional knowledge bases lies the optimization of data integration and sharing mechanisms. This entails addressing challenges such as data security and privacy while adopting standardized data interfaces or leveraging federated learning paradigms to enable multidimensional data integration." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.915, + 0.198 + ], + "angle": 0, + "content": "Through semantic alignment across multiple sources, entity resolution, and concept abstraction, cross-institutional knowledge can be transformed into authoritative and richly contextualized knowledge bases. These enhanced repositories provide robust contextual support for reasoning tasks and can deliver deeper insights in areas such as healthcare, finance, and urban management." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.209, + 0.927, + 0.345 + ], + "angle": 0, + "content": "Fine-Grained Layering and Confidence Grading. In scenarios where retrieval and reasoning operate synchronously, the interpretability and reliability of generated outcomes are paramount. Fine-grained layering of data and indices, along with confidence grading of retrieval results, enables the system to selectively use the most trustworthy and relevant subsets of data during different stages of reasoning. This approach fosters transparency and traceability in final decisions or generative outputs." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.346, + 0.915, + 0.451 + ], + "angle": 0, + "content": "For instance, in medical diagnosis scenarios, confidence grading can initiate additional verification or expert review in high-risk cases. In the legal domain, confidence layering systematically presents key evidence and identifies sources of uncertainty, reducing reasoning vulnerabilities and minimizing the risk of erroneous conclusions caused by information ambiguity." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.462, + 0.771, + 0.477 + ], + "angle": 0, + "content": "8.3.2 Models and Methodologies." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.488, + 0.922, + 0.685 + ], + "angle": 0, + "content": "Event-Driven Active Retrieval. Traditional retrieval mechanisms are predominantly passive. However, event-driven active retrieval presents a promising exploration avenue. By monitoring critical events, such as the injection of new data, user interactions, or changes in external sensors, event-triggered retrieval and reasoning processes can be initiated to capture and respond to potential risks and opportunities in real time. Integrating methodologies such as sequence-based event detection or multitask-learning-based intent recognition can facilitate automatic determination of when and how to trigger retrieval actions. Iteratively optimizing these processes contributes to a more efficient and continuous reasoning loop." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.696, + 0.92, + 0.832 + ], + "angle": 0, + "content": "Spatiotemporal-Aware Retrieval and Association. Many applications, such as natural disaster monitoring, traffic flow prediction, and inventory management in retail, exhibit strong dependencies on temporal and spatial dimensions. By incorporating spatiotemporal-aware algorithms, retrieval processes can prioritize or emphasize crucial documents according to constraints tied to time and space. This not only enhances timeliness but also improves the purposefulness and accuracy of reasoning." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.832, + 0.915, + 0.908 + ], + "angle": 0, + "content": "Furthermore, modeling the evolution of events within spatiotemporal dimensions—when combined with semantic indexing and vector-based retrieval mechanisms in RAG—can enable more precise characterization and utilization of complex spatiotemporal dynamics during reasoning." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.086, + 0.06, + 0.381, + 0.072 + ], + "angle": 0, + "content": "Synergizing RAG and Reasoning: A Systematic Review" + }, + { + "type": "header", + "bbox": [ + 0.654, + 0.06, + 0.912, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.092, + 0.482, + 0.227 + ], + "angle": 0, + "content": "Multimodal Fusion in Retrieval and Reasoning. Multimodal data (e.g., text, images, audio, video, and sensor data) collectively constitute a richer contextual environment, offering critical cues for reasoning tasks. However, existing studies are often limited to the retrieval of single or a few data modalities. Advancing research on multimodal fusion and reasoning mechanisms under the RAG+Reasoning framework has the potential to greatly enhance the system's capacity for addressing complex queries." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.228, + 0.482, + 0.393 + ], + "angle": 0, + "content": "The research focus lies in constructing cross-modal representation learning and alignment methods, enabling unified representations of the same entities or events across different modalities. During retrieval, confidence scores for each modality can be integrated into a comprehensive ranking process, culminating in multimodal-informed joint decision-making during reasoning. This approach not only improves contextual understanding in complex tasks but also broadens the application scope of RAG technologies in scenarios such as expert systems and autonomous driving, where sensory integration and interpretation are critical." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.41, + 0.482, + 0.589 + ], + "angle": 0, + "content": "Dynamic Risk Propagation Modeling and Management. The tight coupling of retrieval and reasoning with multi-stage decision-making inevitably introduces risk propagation issues. Misjudgments of high-risk or low-confidence documents during upstream retrieval are often inherited by downstream reasoning processes, amplifying uncertainties and increasing error margins. To address this, dynamic risk modeling should be embedded within retrieval workflows, enabling risk quantification, tracking, and management at multiple stages. When necessary, risk mitigation mechanisms or process rollbacks can be triggered, creating a closed-loop correction framework." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.591, + 0.482, + 0.725 + ], + "angle": 0, + "content": "Incorporating strategies for analyzing and managing risk propagation is not only a technical challenge but also a matter of system deployment and standardization. In high-stakes domains such as healthcare and financial risk management, establishing comprehensive safety standards and compliance protocols will be crucial. These protocols should treat dynamic risk propagation management as a critical component of evaluating and iterating knowledge retrieval and reasoning systems." + }, + { + "type": "title", + "bbox": [ + 0.087, + 0.743, + 0.292, + 0.756 + ], + "angle": 0, + "content": "8.3.3 Application Services." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.772, + 0.482, + 0.906 + ], + "angle": 0, + "content": "Validation of Logical Chain Completeness. While RAG with Reasoning can provide partially interpretable reasoning outputs, verifying the completeness of logical chains remains a challenge. Future research could integrate formal verification or symbolic reasoning techniques to ensure consistency and completeness across key reasoning nodes and intermediate conclusions. This would prevent logical gaps or illogical leaps in reasoning, offering robust regulatory support for high-stakes industries such as law and finance." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.092, + 0.913, + 0.227 + ], + "angle": 0, + "content": "Intervenable Generation During Reasoning. Contemporary Agentic RAG often operate as \"black boxes,\" rendering external interventions nearly impossible during generative reasoning tasks. However, providing mechanisms for human intervention—such as through visualization or interactive interfaces—could enable experts or users to perform manual corrections, initialize prior knowledge, or modify interim assumptions during the reasoning process. This would substantially enhance the system's flexibility and safety." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.228, + 0.913, + 0.347 + ], + "angle": 0, + "content": "Specifically, intervenable generation allows not only post hoc error corrections but also proactive identification and rectification of potential risks or biases at earlier stages. Interactive interpretable reasoning platforms or visualization tools grounded in knowledge graphs could empower users to scrutinize and influence reasoning workflows, thereby enhancing confidence and control in decision-making processes across diverse domains." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.357, + 0.913, + 0.505 + ], + "angle": 0, + "content": "Risk Decision Interception Firewalls. In closed-loop automated tasks such as algorithmic trading or medical diagnostic decision-making, erroneous reasoning outputs can lead to catastrophic outcomes. To mitigate such risks, the system architecture should incorporate risk decision interception firewalls, which perform multidimensional validations at critical reasoning nodes or prior to outputting decisions. When confidence levels or high-risk indicators breach thresholds, these firewalls can block decision outputs or escalate them for stricter human review." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.507, + 0.913, + 0.582 + ], + "angle": 0, + "content": "This mechanism serves as a \"final line of defense\" for RAG+Reasoning systems, ensuring decision security in large-scale automated information networks. It also provides a robust foundation for compliance and regulatory auditing, enabling safer deployment in critical applications." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.591, + 0.913, + 0.71 + ], + "angle": 0, + "content": "Edge-Cloud Collaborative Retrieval and Reasoning. With the rapid development of IoT and 5G technologies, many scenarios demand on-site data collection and preliminary processing on edge devices, followed by high-level retrieval and reasoning tasks on cloud platforms. Efficiently partitioning tasks, allocating resources, and maintaining consistency between indexes and models across the edge-cloud continuum represent critical research directions." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.711, + 0.913, + 0.831 + ], + "angle": 0, + "content": "Leveraging techniques such as lightweight model compression, distributed index synchronization, and communication optimization can ensure fast reasoning while maximizing resource utilization. Edge-cloud collaborative solutions are particularly impactful for real-time industrial monitoring and smart city applications, reducing network latency and bandwidth bottlenecks while ensuring accurate and timely inference outputs." + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.833, + 0.913, + 0.905 + ], + "angle": 0, + "content": "In summary, RAG+Reasoning systems present many untapped opportunities across various dimensions. Further research and practical validation could greatly improve their use in complex, high-risk scenarios while fueling new growth in GenAI." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.086, + 0.06, + 0.347, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.859, + 0.06, + 0.912, + 0.071 + ], + "angle": 0, + "content": "Gao et al." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.091, + 0.243, + 0.105 + ], + "angle": 0, + "content": "9 Future Trends" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.111, + 0.482, + 0.156 + ], + "angle": 0, + "content": "In this chapter, we summarize four major trends in technological advancements based on current research, aiming to elucidate and guide the potential future directions of RAG." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.179, + 0.382, + 0.194 + ], + "angle": 0, + "content": "9.1 The Integration of RAG and Graph" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.198, + 0.482, + 0.302 + ], + "angle": 0, + "content": "Recent developments have witnessed a growing synergy between RAG systems and graph-based approaches. The intrinsic benefits of graph structures, such as explicit logical relationships and knowledge indexing, have enabled new paradigms for addressing challenges in global reasoning, dynamic data management, and personalized services within RAG systems." + }, + { + "type": "title", + "bbox": [ + 0.103, + 0.304, + 0.292, + 0.318 + ], + "angle": 0, + "content": "Knowledge Organization." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.319, + 0.483, + 0.665 + ], + "angle": 0, + "content": "Graph-structured knowledge organization frameworks offer a powerful alternative to traditional vector-based retrieval methods, excelling in modeling complex relationships and supporting global reasoning. For example, GraphRAG [18] combines hierarchical graph indexing with community detection to extract entity relationship networks from text corpora, enabling large-scale thematic analysis through hierarchical summaries. Building on this, PIKE [82] introduces a multi-level heterogeneous knowledge graph that organizes documents, semantic segments, and refined knowledge units into a three-layer hierarchy, improving extraction accuracy and multi-hop reasoning via atomized knowledge construction and task decomposition. For dynamic personalization, EMG-RAG [89] features a three-layer Editable Memory Graph architecture that structures memory data by ontology classification, subclass, and entity relationships, using reinforcement learning to enable real-time updates and multidimensional queries. Together, these advances leverage graph topologies to address the limitations of conventional RAG systems—such as one-dimensional representation and weak contextual links—enabling multilevel reasoning from local fact retrieval to global thematic summarization and forming a foundation for interpretable, adaptive RAG systems." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.666, + 0.483, + 0.876 + ], + "angle": 0, + "content": "Symbolic Reasoning. Graph-structured symbolic reasoning methods leverage the multi-hop reasoning power of Knowledge Graphs (KG) to better manage complex semantic and logical relationships. Frameworks like HippoRAG2 and the Think-on-Graph (ToG) [60] series exemplify this. HippoRAG2 [28] builds open knowledge graphs and uses personalized PageRank with a dense-sparse coding approach inspired by brain memory, boosting performance in factual memory, semantic understanding, and multi-hop reasoning. Likewise, ToG-2 combines iterative retrieval of knowledge graphs and documents, using relationship discovery, entity pruning, and context-driven graph searches to integrate fine-grained information from unstructured text, enhancing implicit relationship detection." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.877, + 0.482, + 0.907 + ], + "angle": 0, + "content": "Task Planning. Graph-based task planning in RAG systems enhances complex problem-solving by overcoming the" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.092, + 0.913, + 0.272 + ], + "angle": 0, + "content": "limitations of traditional linear workflows, which struggle with multi-step or multimodal reasoning. These approaches build dynamic knowledge graphs, like Mind Maps, to explicitly model logical dependencies and context. For instance, the Agentic Reasoning [92] transforms reasoning chains into graph structures for entity extraction, relation identification, and community clustering, enabling dynamic path tracking and optimized retrieval, excelling in tasks like doctoral-level GPQA [67]. Collaborative frameworks such as Co-STORM extend this to multi-agent scenarios, representing queries, tool calls, and knowledge integration as traversable graph nodes to support task decomposition and adaptive reasoning." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.274, + 0.932, + 0.438 + ], + "angle": 0, + "content": "Tool Usage and Management. Graph-enhanced approaches to tool management overcome limitations of traditional dependency modeling by effectively capturing complex relationships like parameter passing, functional collaboration, and resource management. Graph RAG-Tool Fusion [57] models tools as graph nodes within a dual-layer architecture of core system APIs and domain-specific tools, encoding direct and indirect dependencies as edges. It uses a two-stage retrieval process: vector-based tool retrieval followed by a graph-based depth-first search to assemble dependency-compliant toolsets." + }, + { + "type": "title", + "bbox": [ + 0.518, + 0.451, + 0.756, + 0.464 + ], + "angle": 0, + "content": "9.2 Multi-Model Collaboration" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.469, + 0.935, + 0.756 + ], + "angle": 0, + "content": "Multi-model collaboration has emerged as a pivotal strategy for enhancing task complexity handling and domain adaptability in RAG systems [13]. By integrating the strengths of different models, this approach achieves optimized performance. For example, the CR-Planner [52] combines general-purpose generation models (e.g., GPT-4) with domain-specific critic models (e.g., Llama-3-8B). This hybrid system dynamically orchestrates subgoal planning and execution evaluation, utilizing MCTS to generate high-quality training data. Similarly, UAR [14] employs intent-aware and knowledgerequirement classifiers to dynamically trigger retrieval, decoupling lightweight classification tasks from resource-intensive decoding operations of LLMs. Furthermore, Adaptive-RAG [41] deploys small-complexity classifiers to route queries into different levels of processing strategies, balancing response speed for simple queries with deep reasoning for complex ones. These strategies form a closed \"generation-evaluation\"loop, leveraging complementary strengths across models to achieve improved accuracy and computational efficiency." + }, + { + "type": "title", + "bbox": [ + 0.518, + 0.768, + 0.756, + 0.781 + ], + "angle": 0, + "content": "9.3 Multi-Modal Collaboration" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.786, + 0.913, + 0.907 + ], + "angle": 0, + "content": "The breakthrough in Chain-of-Thought (CoT) capabilities of language models has catalyzed the transition of multimodal reasoning from perceptual-level integration to cognitive-level reasoning, promoting Multimodal Collaborative Reasoning as a key trend [4] By deeply integrating the logical reasoning capabilities of language models with the spatial-semantic representation of multimodal data, it significantly enhances information synthesis in complex scenarios [2]." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.381, + 0.072 + ], + "angle": 0, + "content": "Synergizing RAG and Reasoning: A Systematic Review" + }, + { + "type": "header", + "bbox": [ + 0.653, + 0.06, + 0.912, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.092, + 0.482, + 0.198 + ], + "angle": 0, + "content": "For instance, in the medical domain, multimodal RAG systems such as MedCoT [56] utilize hierarchical expert systems to integrate CT imaging and pathology reports, enabling knowledge graph validation of diagnostic hypotheses and reducing misdiagnosis risks. Future research will likely focus on robust cross-modal knowledge alignment, progressive knowledge distillation, and adaptive reasoning frameworks." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.223, + 0.4, + 0.238 + ], + "angle": 0, + "content": "9.4 Customized Reinforcement Learning" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.241, + 0.483, + 0.438 + ], + "angle": 0, + "content": "The application of reinforcement learning (RL) in RAG systems has become instrumental in improving module coordination and enhancing overall efficiency. Recent studies focus on designing reward mechanisms tailored to the specific needs of RAG systems. Frameworks such as RAG-Gym [96] and DeepRAG [24] model reasoning processes using Markov Decision Processes and introduce fine-grained process supervision mechanisms. Additionally, ReARTeR [49] and SmartRAG [20] incorporate trust-aware reward strategies and end-to-end policy optimization to achieve superior accuracy and robustness. Opportunities remain for further exploring automated reward modeling with LLMs to facilitate fine-grained supervision." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.465, + 0.229, + 0.479 + ], + "angle": 0, + "content": "10 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.484, + 0.483, + 0.575 + ], + "angle": 0, + "content": "This paper has systematically reviewed the synergistic integration of Retrieval-Augmented Generation (RAG) and reasoning, providing a formal definition of reasoning within the RAG framework as a structured, multi-step, goal-driven process that dynamically combines parametric and retrieved knowledge to address complex problems." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.575, + 0.486, + 0.665 + ], + "angle": 0, + "content": "We presented a comprehensive taxonomy covering the purposes, collaboration paradigms, and implementation methods underlying RAG+Reasoning systems. The synergy enables more precise retrieval informed by logical analysis and enhances reasoning with contextually relevant, up-to-date knowledge beyond parametric limitations." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.666, + 0.483, + 0.786 + ], + "angle": 0, + "content": "While the enhanced reasoning capabilities allow tackling complex knowledge-intensive tasks such as deep research, expert-level problem solving, and domain-specific decision support, practical challenges remain. These include computational and token costs that grow non-linearly, risks of overthinking leading to inefficiency and error propagation, and the lack of evaluation frameworks that effectively assess intermediate reasoning quality alongside final results." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.786, + 0.483, + 0.877 + ], + "angle": 0, + "content": "To bridge the gap from theory to real-world application, we proposed practical design guidelines tailored to diverse domains like finance, healthcare, law, and personal assistants, emphasizing adaptability to heterogeneous, dynamic knowledge sources and strict requirements for output reliability and traceability." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.877, + 0.483, + 0.908 + ], + "angle": 0, + "content": "Finally, we identified promising directions for future research, including graph-structured knowledge integration," + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.092, + 0.914, + 0.137 + ], + "angle": 0, + "content": "multimodal and multi-model collaborative reasoning architectures, and advanced reinforcement learning techniques for optimizing retrieval-reasoning workflows." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.137, + 0.915, + 0.213 + ], + "angle": 0, + "content": "Overall, this work establishes both a theoretical foundation and practical roadmap to drive the development of next-generation RAG+Reasoning systems capable of robust, transparent, and efficient cognition, paving the way for impactful applications across academia and industry." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.232, + 0.618, + 0.247 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.529, + 0.251, + 0.914, + 0.302 + ], + "angle": 0, + "content": "[1] Abdelrahman Abdallah, Bhawna Piryani, Jamshid Mozafari, Mohammed Ali, and Adam Jatowt. 2025. Rankify: A comprehensive python toolkit for retrieval, re-ranking, and retrieval-augmented generation. arXiv preprint arXiv:2502.02464 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.529, + 0.302, + 0.915, + 0.377 + ], + "angle": 0, + "content": "[2] Mohammad Mahdi Abootorabi, Amirhosein Zobeiri, Mahdi Dehghani, Mohammadali Mohammadkhani, Bardia Mohammadi, Omid Ghahroodi, Mahdieh Soleymani Baghshah, and Ehsaneddin Asgari. 2025. Ask in Any Modality: A Comprehensive Survey on Multimodal Retrieval-Augmented Generation. arXiv preprint arXiv:2502.08826 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.529, + 0.379, + 0.915, + 0.429 + ], + "angle": 0, + "content": "[3] Akari Asai, Zeqiu Wu, Yizhong Wang, Avirup Sil, and Hannaneh Hajishirzi. 2023. Self-rag: Learning to retrieve, generate, and critique through self-reflection. In The Twelfth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.531, + 0.429, + 0.915, + 0.479 + ], + "angle": 0, + "content": "[4] Jing Bi, Susan Liang, Xiaofei Zhou, Pinxin Liu, Junjia Guo, Yunlong Tang, Luchuan Song, Chao Huang, Guangyu Sun, Jinxi He, et al. 2025. Why Reasoning Matters? A Survey of Advancements in Multimodal Reasoning (v1). arXiv preprint arXiv:2504.03151 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.531, + 0.479, + 0.915, + 0.517 + ], + "angle": 0, + "content": "[5] Yuxi Bi, Yunfan Gao, and Haofen Wang. 2025. StePO-Rec: Towards Personalized Outfit Styling Assistant via Knowledge-Guided Multi-Step Reasoning. arXiv preprint arXiv:2504.09915 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.531, + 0.517, + 0.914, + 0.568 + ], + "angle": 0, + "content": "[6] Mingyang Chen, Tianpeng Li, Haoze Sun, Yijie Zhou, Chenzheng Zhu, Fan Yang, Zenan Zhou, Weipeng Chen, Haofen Wang, Jeff Z Pan, et al. 2025. Learning to Reason with Search for LLMs via Reinforcement Learning. arXiv preprint arXiv:2503.19470 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.531, + 0.567, + 0.914, + 0.617 + ], + "angle": 0, + "content": "[7] Peter Baile Chen, Yi Zhang, Michael Cafarella, and Dan Roth. 2025. Can we Retrieve Everything All at Once? ARM: An Alignment-Oriented LLM-based Retrieval Method. arXiv preprint arXiv:2501.18539 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.531, + 0.617, + 0.915, + 0.68 + ], + "angle": 0, + "content": "[8] Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiannan Guan, Peng Wang, Mengkang Hu, Yuhang Zhou, Te Gao, and Wangxiang Che. 2025. Towards reasoning era: A survey of long chain-of-thought for reasoning large language models. arXiv preprint arXiv:2503.09567 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.531, + 0.68, + 0.915, + 0.731 + ], + "angle": 0, + "content": "[9] Wenhu Chen, Ming Yin, Max Ku, Pan Lu, Yixin Wan, Xueguang Ma, Jianyu Xu, Xinyi Wang, and Tony Xia. 2023. Theoremqa: A theorem-driven question answering dataset. arXiv preprint arXiv:2305.12524 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.731, + 0.914, + 0.781 + ], + "angle": 0, + "content": "[10] Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. 2024. Do not think that much for \\(2 + 3 = ?\\) on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.781, + 0.914, + 0.819 + ], + "angle": 0, + "content": "[11] Yixiang Chen, Penglei Sun, Xiang Li, and Xiaowen Chu. 2025. MRD-RAG: Enhancing Medical Diagnosis with Multi-Round Retrieval-Augmented Generation. arXiv preprint arXiv:2504.07724 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.819, + 0.914, + 0.869 + ], + "angle": 0, + "content": "[12] Yiqun Chen, Lingyong Yan, Weiwei Sun, Xinyu Ma, Yi Zhang, Shuaiqiang Wang, Dawei Yin, Yiming Yang, and Jiaxin Mao. 2025. Improving Retrieval-Augmented Generation through Multi-Agent Reinforcement Learning. arXiv preprint arXiv:2501.15228 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.869, + 0.914, + 0.907 + ], + "angle": 0, + "content": "[13] Zhijun Chen, Jingzheng Li, Pengpeng Chen, Zhuoran Li, Kai Sun, Yuankai Luo, Qianren Mao, Dingqi Yang, Hailong Sun, and Philip S Yu. 2025. Harnessing Multiple Large Language Models: A Survey on" + }, + { + "type": "list", + "bbox": [ + 0.524, + 0.251, + 0.915, + 0.907 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.086, + 0.06, + 0.347, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.858, + 0.06, + 0.912, + 0.071 + ], + "angle": 0, + "content": "Gao et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.12, + 0.094, + 0.411, + 0.106 + ], + "angle": 0, + "content": "LLM Ensemble. arXiv preprint arXiv:2502.18036 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.107, + 0.484, + 0.156 + ], + "angle": 0, + "content": "[14] Qinyuan Cheng, Xiaonan Li, Shimin Li, Qin Zhu, Zhangyue Yin, Yunfan Shao, Linyang Li, Tianxiang Sun, Hang Yan, and Xipeng Qiu. 2024. Unified active retrieval for retrieval augmented generation. arXiv preprint arXiv:2406.12534 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.157, + 0.484, + 0.219 + ], + "angle": 0, + "content": "[15] Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, et al. 2025. The Danger of Overthinking: Examining the Reasoning-Action Dilemma in Agentic Tasks. arXiv preprint arXiv:2502.08235 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.22, + 0.482, + 0.257 + ], + "angle": 0, + "content": "[16] Alan Dao and Thinh Le. 2025. ReZero: Enhancing LLM search ability by trying one-more-time. arXiv:2504.11001 [cs.CL] https://arxiv.org/abs/2504.11001" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.258, + 0.483, + 0.295 + ], + "angle": 0, + "content": "[17] Tim Dettmers, Artidoro Pagnoni, Ari Holtzman, and Luke Zettlemoyer. 2023. Qlora: Efficient finetuning of quantized llms. Advances in neural information processing systems 36 (2023), 10088-10115." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.296, + 0.483, + 0.357 + ], + "angle": 0, + "content": "[18] Darren Edge, Ha Trinh, Newman Cheng, Joshua Bradley, Alex Chao, Apurva Mody, Steven Truitt, Dasha Metropolitansky, Robert Oazuwa Ness, and Jonathan Larson. 2024. From local to global: A graph rag approach to query-focused summarization. arXiv preprint arXiv:2404.16130 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.359, + 0.482, + 0.396 + ], + "angle": 0, + "content": "[19] Chenrui Fan, Ming Li, Lichao Sun, and Tianyi Zhou. 2025. Missing Premise exacerbates Overthinking: Are Reasoning Models losing Critical Thinking Skill? arXiv preprint arXiv:2504.06514 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.397, + 0.482, + 0.433 + ], + "angle": 0, + "content": "[20] Jingsheng Gao, Linxu Li, Weiyuan Li, Yuzhuo Fu, and Bin Dai. 2024. SmartRAG: Jointly Learn RAG-Related Tasks From the Environment Feedback. arXiv preprint arXiv:2410.18141 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.434, + 0.482, + 0.483 + ], + "angle": 0, + "content": "[21] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, and Haofen Wang. 2023. Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.485, + 0.482, + 0.522 + ], + "angle": 0, + "content": "[22] Yunfan Gao, Yun Xiong, Meng Wang, and Haofen Wang. 2024. Modular rag: Transforming rag systems into lego-like reconfigurable frameworks. arXiv preprint arXiv:2407.21059 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.523, + 0.482, + 0.572 + ], + "angle": 0, + "content": "[23] Zengyi Gao, Yukun Cao, Hairu Wang, Ao Ke, Yuan Feng, Xike Xie, and S Kevin Zhou. 2025. FRAG: A Flexible Modular Framework for Retrieval-Augmented Generation based on Knowledge Graphs. arXiv preprint arXiv:2501.09957 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.573, + 0.482, + 0.622 + ], + "angle": 0, + "content": "[24] Xinyan Guan, Jiali Zeng, Fandong Meng, Chunlei Xin, Yaojie Lu, Hongyu Lin, Xianpei Han, Le Sun, and Jie Zhou. 2025. DeepRAG: Thinking to Retrieve Step by Step for Large Language Models. arXiv preprint arXiv:2502.01142 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.623, + 0.482, + 0.672 + ], + "angle": 0, + "content": "[25] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.673, + 0.482, + 0.722 + ], + "angle": 0, + "content": "[26] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.723, + 0.482, + 0.748 + ], + "angle": 0, + "content": "[27] Zirui Guo, Lianghao Xia, Yanhua Yu, Tu Ao, and Chao Huang. 2024. Lighthrag: Simple and fast retrieval-augmented generation. (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.749, + 0.482, + 0.786 + ], + "angle": 0, + "content": "[28] Bernal Jiménez Gutiérrez, Yiheng Shu, Weijian Qi, Sizhe Zhou, and Yu Su. 2025. From RAG to Memory: Non-Parametric Continual Learning for Large Language Models. arXiv preprint arXiv:2502.14802 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.787, + 0.482, + 0.848 + ], + "angle": 0, + "content": "[29] Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, et al. 2024. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.849, + 0.482, + 0.899 + ], + "angle": 0, + "content": "[30] Yancheng He, Shilong Li, Jiaheng Liu, Weixun Wang, Xingyuan Bu, Ge Zhang, Zhongyuan Peng, Zhaoxiang Zhang, Zhicheng Zheng, Wenbo Su, et al. 2025. Can Large Language Models Detect Errors in Long Chain-of-Thought Reasoning? arXiv preprint arXiv:2502.19361" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.094, + 0.484, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.55, + 0.095, + 0.588, + 0.106 + ], + "angle": 0, + "content": "(2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.525, + 0.107, + 0.912, + 0.144 + ], + "angle": 0, + "content": "[31] Xanh Ho, Anh-Khoa Duong Nguyen, Saku Sugawara, and Akiko Aizawa. 2020. Constructing a multi-hop qa dataset for comprehensive evaluation of reasoning steps. arXiv preprint arXiv:2011.01060 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.145, + 0.912, + 0.181 + ], + "angle": 0, + "content": "[32] Yubin Hong, Chaofan Li, Jingyi Zhang, and Yingxia Shao. 2025. FG-RAG: Enhancing Query-Focused Summarization with Context-Aware Fine-Grained Graph RAG. arXiv preprint arXiv:2504.07103 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.182, + 0.914, + 0.244 + ], + "angle": 0, + "content": "[33] SU Hongjin, Howard Yen, Mengzhou Xia, Weijia Shi, Niklas Muennighoff, Han-yu Wang, Liu Haisu, Quan Shi, Zachary S Siegel, Michael Tang, et al. 2024. BRIGHT: A Realistic and Challenging Benchmark for Reasoning-Intensive Retrieval. In The Thirteenth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.245, + 0.912, + 0.282 + ], + "angle": 0, + "content": "[34] Sheryl Hsu, Omar Khattab, Chelsea Finn, and Archit Sharma. 2024. Grounding by trying: Llms with reinforcement learning-enhanced retrieval. arXiv preprint arXiv:2410.23214 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.283, + 0.912, + 0.32 + ], + "angle": 0, + "content": "[35] Jian Hu. 2025. REINFORCE++: A Simple and Efficient Approach for Aligning Large Language Models. arXiv preprint arXiv:2501.03262 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.321, + 0.912, + 0.357 + ], + "angle": 0, + "content": "[36] Yunhai Hu, Yilun Zhao, Chen Zhao, and Arman Cohan. 2025. MCTS-RAG: Enhancing Retrieval-Augmented Generation with Monte Carlo Tree Search. arXiv preprint arXiv:2503.20757 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.359, + 0.914, + 0.408 + ], + "angle": 0, + "content": "[37] Fantine Huot, Reinald Kim Amplayo, Jennimaria Palomaki, Alice Shoshana Jakobovits, Elizabeth Clark, and Mirella Lapata. 2024. Agents' Room: Narrative Generation through Multi-step Collaboration. arXiv preprint arXiv:2410.02603 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.409, + 0.912, + 0.459 + ], + "angle": 0, + "content": "[38] Shayekh Bin Islam, Md Asib Rahman, KSM Hossain, Enamul Hoque, Shafiq Joty, and Md Rizwan Parvez. 2024. Open-rag: Enhanced retrieval-augmented reasoning with open-source large language models. arXiv preprint arXiv:2410.01782 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.459, + 0.912, + 0.508 + ], + "angle": 0, + "content": "[39] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. 2024. Openai o1 system card. arXiv preprint arXiv:2412.16720 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.509, + 0.912, + 0.571 + ], + "angle": 0, + "content": "[40] Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. 2024. Livecodebench: Holistic and contamination free evaluation of large language models for code. arXiv preprint arXiv:2403.07974 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.572, + 0.912, + 0.622 + ], + "angle": 0, + "content": "[41] Soyeong Jeong, Jinheon Baek, Sukmin Cho, Sung Ju Hwang, and Jong C Park. 2024. Adaptive-rag: Learning to adapt retrieval-augmented large language models through question complexity arXiv preprint arXiv:2403.14403 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.623, + 0.912, + 0.659 + ], + "angle": 0, + "content": "[42] Pengcheng Jiang. 2025. DeepRetrieval: Powerful Query Generation for Information Retrieval with Reinforcement Learning. arXiv preprint arXiv:2503.00223 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.66, + 0.912, + 0.71 + ], + "angle": 0, + "content": "[43] Yucheng Jiang, Yijia Shao, Dekun Ma, Sina J Semnani, and Monica S Lam. 2024. Into the unknown unknowns: Engaged human learning through participation in language model agent conversations. arXiv preprint arXiv:2408.15232 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.711, + 0.912, + 0.76 + ], + "angle": 0, + "content": "[44] Yucheng Jiang, Yijia Shao, Dekun Ma, Sina J Semnani, and Monica S Lam. 2024. Into the unknown unknowns: Engaged human learning through participation in language model agent conversations. arXiv preprint arXiv:2408.15232 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.761, + 0.912, + 0.822 + ], + "angle": 0, + "content": "[45] Zhengbao Jiang, Frank F Xu, Luyu Gao, Zhiqing Sun, Qian Liu, Jane Dwivedi-Yu, Yiming Yang, Jamie Callan, and Graham Neubig. 2023 Active retrieval augmented generation. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing 7969-7992." + }, + { + "type": "ref_text", + "bbox": [ + 0.524, + 0.823, + 0.914, + 0.886 + ], + "angle": 0, + "content": "[46] Ashutosh Joshi, Sheikh Muhammad Sarwar, Samarth Varshney, Sreyashi Nag, Shrivats Agrawal, and Juhi Naik. 2024. REAPER: Reasoning based retrieval planning for complex RAG systems. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 4621-4628." + }, + { + "type": "list", + "bbox": [ + 0.524, + 0.095, + 0.914, + 0.886 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.381, + 0.072 + ], + "angle": 0, + "content": "Synergizing RAG and Reasoning: A Systematic Review" + }, + { + "type": "header", + "bbox": [ + 0.653, + 0.06, + 0.913, + 0.072 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.093, + 0.484, + 0.156 + ], + "angle": 0, + "content": "[47] Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, et al. 2019. Natural questions: a benchmark for question answering research. Transactions of the Association for Computational Linguistics 7 (2019), 453-466." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.157, + 0.484, + 0.232 + ], + "angle": 0, + "content": "[48] Myeonghwa Lee, Seonho An, and Min-Soo Kim. 2024. PlanRAG: A plan-then-retrieval augmented generation for generative large language models as decision makers. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers). 6537–6555." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.233, + 0.484, + 0.294 + ], + "angle": 0, + "content": "[49] Zhicheng Lee, Shulin Cao, Jinxin Liu, Jiajie Zhang, Weichuan Liu, Xiaoyin Che, Lei Hou, and Juanzi Li. 2025. ReaRAG: Knowledge-guided Reasoning Enhances Factuality of Large Reasoning Models with Iterative Retrieval Augmented Generation. arXiv preprint arXiv:2503.21729 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.295, + 0.483, + 0.333 + ], + "angle": 0, + "content": "[50] Jinzheng Li, Jingshu Zhang, Hongguang Li, and Yiqing Shen. 2024. An Agent Framework for Real-Time Financial Information Searching with Large Language Models. arXiv preprint arXiv:2502.15684 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.334, + 0.483, + 0.383 + ], + "angle": 0, + "content": "[51] Xiaoxi Li, Guanting Dong, Jiajie Jin, Yuyao Zhang, Yujia Zhou, Yutao Zhu, Peitian Zhang, and Zhicheng Dou. 2025. Search-01: Agentic search-enhanced large reasoning models. arXiv preprint arXiv:2501.05366 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.383, + 0.483, + 0.434 + ], + "angle": 0, + "content": "[52] Xingxuan Li, Weiwen Xu, Ruochen Zhao, Fangkai Jiao, Shafiq Joty, and Lidong Bing. 2024. Can We Further Elicit Reasoning in LLMs? Critic-Guided Planning with Retrieval-Augmentation for Solving Challenging Tasks. arXiv preprint arXiv:2410.01428 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.435, + 0.483, + 0.485 + ], + "angle": 0, + "content": "[53] Xingxuan Li, Weiwen Xu, Ruochen Zhao, Fangkai Jiao, Shafiq Joty, and Lidong Bing. 2024. Can We Further Elicit Reasoning in LLMs? Critic-Guided Planning with Retrieval-Augmentation for Solving Challenging Tasks. arXiv preprint arXiv:2410.01428 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.485, + 0.484, + 0.547 + ], + "angle": 0, + "content": "[54] Zhuoqun Li, Haiyang Yu, Xuanang Chen, Hongyu Lin, Yaojie Lu, Fei Huang, Xianpei Han, Yongbin Li, and Le Sun. 2025. Deepsolution: Boosting complex engineering solution design via tree-based exploration and bi-point thinking. arXiv preprint arXiv:2502.20730 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.547, + 0.483, + 0.597 + ], + "angle": 0, + "content": "[55] Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. 2023. Let's verify step by step. In *The Twelfth International Conference on Learning Representations*." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.598, + 0.483, + 0.635 + ], + "angle": 0, + "content": "[56] Jiaxiang Liu, Yuan Wang, Jiawei Du, Joey Tianyi Zhou, and Zuozhu Liu. 2024. Medcot: Medical chain of thought via hierarchical expert. arXiv preprint arXiv:2412.13736 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.636, + 0.483, + 0.672 + ], + "angle": 0, + "content": "[57] Elias Lumer, Pradeep Honaganahalli Basavaraju, Myles Mason, James A Burke, and Vamse Kumar Subbiah. 2025. Graph RAG-Tool Fusion. arXiv preprint arXiv:2502.07223 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.673, + 0.483, + 0.723 + ], + "angle": 0, + "content": "[58] Haoran Luo, Yikai Guo, Qika Lin, Xiaobao Wu, Xinyu Mu, Wenhao Liu, Meina Song, Yifan Zhu, Luu Anh Tuan, et al. 2025. KBQA-o1: Agentic Knowledge Base Question Answering with Monte Carlo Tree Search. arXiv preprint arXiv:2501.18922 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.723, + 0.483, + 0.785 + ], + "angle": 0, + "content": "[59] Yuanjie Lyu, Zhiyu Li, Simin Niu, Feiyu Xiong, Bo Tang, Wenjin Wang, Hao Wu, Huanyong Liu, Tong Xu, and Enhong Chen. 2025. Crud-rag: A comprehensive chinese benchmark for retrieval-augmented generation of large language models. ACM Transactions on Information Systems 43, 2 (2025), 1-32." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.786, + 0.483, + 0.848 + ], + "angle": 0, + "content": "[60] Shengjie Ma, Chengjin Xu, Xuhui Jiang, Muzhi Li, Huaren Qu, Cehao Yang, Jiaxin Mao, and Jian Guo. 2024. Think-on-Graph 2.0: Deep and Faithful Large Language Model Reasoning with Knowledge-guided Retrieval Augmented Generation. arXiv preprint arXiv:2407.10805 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.849, + 0.483, + 0.899 + ], + "angle": 0, + "content": "[61] Xinbei Ma, Yeyun Gong, Pengcheng He, Hai Zhao, and Nan Duan. 2023. Query rewriting in retrieval-augmented large language models. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. 5303-5315." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.093, + 0.484, + 0.899 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.093, + 0.915, + 0.142 + ], + "angle": 0, + "content": "[62] Grégoire Mialon, Clémentine Fourrier, Thomas Wolf, Yann LeCun, and Thomas Scialom. 2023. Gaia: a benchmark for general ai assistants. In The Twelfth International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.145, + 0.915, + 0.193 + ], + "angle": 0, + "content": "[63] Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. 2025. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.195, + 0.915, + 0.243 + ], + "angle": 0, + "content": "[64] Shishir G Patil, Tianjun Zhang, Xin Wang, and Joseph E Gonzalez. 2024. Gorilla: Large language model connected with massive apis. Advances in Neural Information Processing Systems 37 (2024), 126544-126565." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.245, + 0.915, + 0.295 + ], + "angle": 0, + "content": "[65] Fabio Petroni, Aleksandra Piktus, Angela Fan, Patrick Lewis, Majid Yazdani, Nicola De Cao, James Thorne, Yacine Jernite, Vladimir Karpukhin, Jean Maillard, et al. 2020. KILT: a benchmark for knowledge intensive language tasks. arXiv preprint arXiv:2009.02252 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.296, + 0.913, + 0.332 + ], + "angle": 0, + "content": "[66] Pouya Pezeshkpour and Estevam Hruschka. 2025. Insight-RAG: Enhancing LLMs with Insight-Driven Augmentation. arXiv preprint arXiv:2504.00187 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.334, + 0.913, + 0.383 + ], + "angle": 0, + "content": "[67] David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R Bowman. 2024. Gpqa: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.383, + 0.913, + 0.433 + ], + "angle": 0, + "content": "[68] Zhihong Shao, Yeyun Gong, Yelong Shen, Minlie Huang, Nan Duan, and Weizhu Chen. 2023. Enhancing retrieval-augmented large language models with iterative retrieval-generation synergy. arXiv preprint arXiv:2305.15294 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.434, + 0.913, + 0.484 + ], + "angle": 0, + "content": "[69] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.485, + 0.913, + 0.521 + ], + "angle": 0, + "content": "[70] Quan Shi, Michael Tang, Karthik Narasimhan, and Shunyu Yao. 2024. Can Language Models Solve Olympiad Programming? arXiv preprint arXiv:2404.10952 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.522, + 0.913, + 0.559 + ], + "angle": 0, + "content": "[71] Quan Shi, Michael Tang, Karthik Narasimhan, and Shunyu Yao. 2024. Can Language Models Solve Olympiad Programming? arXiv preprint arXiv:2404.10952 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.56, + 0.913, + 0.609 + ], + "angle": 0, + "content": "[72] Huatong Song, Jinhao Jiang, Yingqian Min, Jie Chen, Zhipeng Chen, Wayne Xin Zhao, Lei Fang, and Ji-Rong Wen. 2025. R1-Searcher: Incentivizing the Search Capability in LLMs via Reinforcement Learning. arXiv preprint arXiv:2503.05592 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.61, + 0.913, + 0.659 + ], + "angle": 0, + "content": "[73] Sakhinana Sagar Srinivas and Venkataramana Runkana. 2025. Scaling Test-Time Inference with Policy-Optimized, Dynamic Retrieval-Augmented Generation via KV Caching and Decoding. arXiv preprint arXiv:2504.01281 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.66, + 0.913, + 0.711 + ], + "angle": 0, + "content": "[74] Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Hanjie Chen, Xia Hu, et al. 2025. Stop overthinking: A survey on efficient reasoning for large language models. arXiv preprint arXiv:2503.16419 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.711, + 0.913, + 0.761 + ], + "angle": 0, + "content": "[75] Zhongxiang Sun, Qipeng Wang, Weijie Yu, Xiaoxue Zang, Kai Zheng, Jun Xu, Xiao Zhang, Song Yang, and Han Li. 2025. ReARTeR: Retrieval-Augmented Reasoning with Trustworthy Process Rewarding. arXiv preprint arXiv:2501.07861 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.761, + 0.913, + 0.798 + ], + "angle": 0, + "content": "[76] Alon Talmor and Jonathan Berant. 2018. The web as a knowledge-base for answering complex questions. arXiv preprint arXiv:1803.06643 (2018)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.799, + 0.913, + 0.836 + ], + "angle": 0, + "content": "[77] Hieu Tran, Zonghai Yao, Junda Wang, Yifan Zhang, Zhichao Yang, and Hong Yu. 2024. RARE: Retrieval-Augmented Reasoning Enhancement for Large Language Models. arXiv preprint arXiv:2412.02830 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.837, + 0.913, + 0.885 + ], + "angle": 0, + "content": "[78] Harsh Trivedi, Niranjan Balasubramanian, Tushar Khot, and Ashish Sabharwal. 2022. Interleaving retrieval with chain-of-thought reasoning for knowledge-intensive multi-step questions. arXiv preprint arXiv:2212.10509 (2022)." + }, + { + "type": "list", + "bbox": [ + 0.523, + 0.093, + 0.915, + 0.885 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.086, + 0.06, + 0.347, + 0.073 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.858, + 0.06, + 0.912, + 0.072 + ], + "angle": 0, + "content": "Gao et al." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.093, + 0.484, + 0.143 + ], + "angle": 0, + "content": "[79] Harsh Trivedi, Niranjan Balasubramanian, Tushar Khot, and Ashish Sabharwal. 2022. MuSiQue: Multihop Questions via Single-hop Question Composition. Transactions of the Association for Computational Linguistics 10 (2022), 539-554." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.144, + 0.484, + 0.194 + ], + "angle": 0, + "content": "[80] Tu Vu, Mohit Iyyer, Xuezhi Wang, Noah Constant, Jerry Wei, Jason Wei, Chris Tar, Yun-Hsuan Sung, Denny Zhou, Quoc Le, et al. 2023. Freshllms: Refreshing large language models with search engine augmentation. arXiv preprint arXiv:2310.03214 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.195, + 0.482, + 0.245 + ], + "angle": 0, + "content": "[81] Ante Wang, Linfeng Song, Ye Tian, Dian Yu, Haitao Mi, Xiangyu Duan, Zhaopeng Tu, Jinsong Su, and Dong Yu. 2025. Don't Get Lost in the Trees: Streamlining LLM Reasoning by Overcoming Tree Search Exploration Pitfalls. arXiv preprint arXiv:2502.11183 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.245, + 0.483, + 0.282 + ], + "angle": 0, + "content": "[82] Jinyu Wang, Jingjing Fu, Rui Wang, Lei Song, and Jiang Bian. 2025. PIKE-RAG: sPecialized Knowledge and Rationale Augmented Generation. arXiv preprint arXiv:2501.11551 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.283, + 0.483, + 0.32 + ], + "angle": 0, + "content": "[83] Liang Wang, Haonan Chen, Nan Yang, Xiaolong Huang, Zhicheng Dou, and Furu Wei. 2025. Chain-of-Retrieval Augmented Generation. arXiv preprint arXiv:2501.14342 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.321, + 0.483, + 0.382 + ], + "angle": 0, + "content": "[84] Ruobing Wang, Daren Zha, Shi Yu, Qingfei Zhao, Yuxuan Chen, Yixuan Wang, Shuo Wang, Yukun Yan, Zhenghao Liu, Xu Han, et al. 2024. Retriever-and-Memory: Towards Adaptive Note-Enhanced Retrieval-Augmented Generation. arXiv preprint arXiv:2410.08821 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.383, + 0.483, + 0.433 + ], + "angle": 0, + "content": "[85] Siqi Wang, Chao Liang, Yunfan Gao, Yang Liu, Jing Li, and Haofen Wang. 2024. Decoding Urban Industrial Complexity: Enhancing Knowledge-Driven Insights via IndustryScopeGPT. In Proceedings of the 32nd ACM International Conference on Multimedia. 4757-4765." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.434, + 0.483, + 0.484 + ], + "angle": 0, + "content": "[86] Shuting Wang, Jiongnan Liu, Shiren Song, Jiehan Cheng, Yuqi Fu, Peidong Guo, Kun Fang, Yutao Zhu, and Zhicheng Dou. 2024. Domainrag: A chinese benchmark for evaluating domain-specific retrieval-augmented generation. arXiv preprint arXiv:2406.05654 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.485, + 0.483, + 0.534 + ], + "angle": 0, + "content": "[87] Xidong Wang, Guiming Hardy Chen, Dingjie Song, Zhiyi Zhang, Zhihong Chen, Qingying Xiao, Feng Jiang, Jianquan Li, Xiang Wan, Benyou Wang, et al. 2023. Cmb: A comprehensive medical benchmark in chinese. arXiv preprint arXiv:2308.08833 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.535, + 0.483, + 0.584 + ], + "angle": 0, + "content": "[88] Xiaohua Wang, Zhenghua Wang, Xuan Gao, Feiran Zhang, Yixin Wu, Zhibo Xu, Tianyuan Shi, Zhengyuan Wang, Shizheng Li, Qi Qian, et al. 2024. Searching for best practices in retrieval-augmented generation. arXiv preprint arXiv:2407.01219 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.585, + 0.483, + 0.634 + ], + "angle": 0, + "content": "[89] Zheng Wang, Zhongyang Li, Zeren Jiang, Dandan Tu, and Wei Shi. 2024. Crafting Personalized Agents through Retrieval-Augmented Generation on Editable Memory Graphs. arXiv preprint arXiv:2409.19401 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.635, + 0.483, + 0.685 + ], + "angle": 0, + "content": "[90] Zhengren Wang, Jiayang Yu, Dongsheng Ma, Zhe Chen, Yu Wang, Zhiyu Li, Feiyu Xiong, Yanfeng Wang, Linpeng Tang, Wentao Zhang, et al. 2025. RARE: Retrieval-Augmented Reasoning Modeling. arXiv preprint arXiv:2503.23513 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.686, + 0.483, + 0.735 + ], + "angle": 0, + "content": "[91] Yixuan Weng, Minjun Zhu, Guangsheng Bao, Hongbo Zhang, Jindong Wang, Yue Zhang, and Linyi Yang. 2024. Cyclereresearcher: Improving automated research via automated review. arXiv preprint arXiv:2411.00816 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.736, + 0.483, + 0.772 + ], + "angle": 0, + "content": "[92] Junde Wu, Jiayuan Zhu, and Yuyuan Liu. 2025. Agentic Reasoning: Reasoning LLMs with Tools for the Deep Research. arXiv preprint arXiv:2502.04644 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.773, + 0.483, + 0.823 + ], + "angle": 0, + "content": "[93] Wenjie Wu, Yongcheng Jing, Yingjie Wang, Wenbin Hu, and Dacheng Tao. 2025. Graph-augmented reasoning: Evolving step-by-step knowledge graph retrieval for llm reasoning. arXiv preprint arXiv:2503.01642 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.824, + 0.483, + 0.885 + ], + "angle": 0, + "content": "[94] Zekun Xi, Wenbiao Yin, Jizhan Fang, Jialong Wu, Runnan Fang, Ningyu Zhang, Jiang Yong, Pengjun Xie, Fei Huang, and Huajun Chen. 2025. OmniThink: Expanding Knowledge Boundaries in Machine Writing through Thinking. arXiv preprint arXiv:2501.09751 (2025)." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.093, + 0.484, + 0.885 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.093, + 0.913, + 0.143 + ], + "angle": 0, + "content": "[95] Liang Xiao, Wen Dai, Shuai Chen, Bin Qin, Chongyang Shi, Haopeng Jing, and Tianyu Guo. 2025. Retrieval-Augmented Generation by Evidence Retroactivity in LLMs. arXiv preprint arXiv:2501.05475 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.145, + 0.913, + 0.193 + ], + "angle": 0, + "content": "[96] Guangzhi Xiong, Qiao Jin, Xiao Wang, Yin Fang, Haolin Liu, Yifan Yang, Fangyuan Chen, Zhixing Song, Dengyu Wang, Minjia Zhang, et al. 2025. Rag-gym: Optimizing reasoning and search agents with process supervision. arXiv preprint arXiv:2502.13957 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.194, + 0.913, + 0.232 + ], + "angle": 0, + "content": "[97] Guanming Xiong, Haochen Li, and Wen Zhao. 2025. MCTS-KBQA: Monte Carlo Tree Search for Knowledge Base Question Answering. arXiv preprint arXiv:2502.13428 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.233, + 0.913, + 0.282 + ], + "angle": 0, + "content": "[98] Ruibin Xiong, Yimeng Chen, Dmitrii Khizbullin, and Jürgen Schmidhuber. 2025. Beyond Outlining: Heterogeneous Recursive Planning for Adaptive Long-form Writing with Language Models. arXiv preprint arXiv:2503.08275 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.283, + 0.913, + 0.344 + ], + "angle": 0, + "content": "[99] Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, et al. 2025. Towards Large Reasoning Models: A Survey of Reinforced Reasoning with Large Language Models. arXiv preprint arXiv:2501.09686 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.345, + 0.913, + 0.407 + ], + "angle": 0, + "content": "[100] Zhipeng Xu, Zhenghao Liu, Yukun Yan, Shuo Wang, Shi Yu, Zheni Zeng, Chaojun Xiao, Zhiyuan Liu, Ge Yu, and Chenyan Xiong. 2024. ActiveRAG: Autonomous Knowledge Assimilation and Accommodation through Retrieval-Augmented Agents. arXiv preprint arXiv:2402.13547 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.409, + 0.913, + 0.433 + ], + "angle": 0, + "content": "[101] Ruiran Yan, Zheng Liu, and Defu Lian. 2025. O1 embedder: Let retrievers think before action. arXiv preprint arXiv:2502.07555 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.434, + 0.913, + 0.483 + ], + "angle": 0, + "content": "[102] Xiaoming Zhang, Ming Wang, Xiaocui Yang, Daling Wang, Shi Feng, and Yifei Zhang. 2024. Hierarchical Retrieval-Augmented Generation Model with Rethink for Multi-hop Question Answering. arXiv preprint arXiv:2408.11875 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.484, + 0.913, + 0.533 + ], + "angle": 0, + "content": "[103] Zhuocheng Zhang, Yang Feng, and Min Zhang. 2025. LevelRAG: Enhancing Retrieval-Augmented Generation with Multi-hop Logic Planning over Rewriting Augmented Searchers. arXiv preprint arXiv:2502.18139 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.534, + 0.913, + 0.572 + ], + "angle": 0, + "content": "[104] Bowen Zhao, Zander Brumbaugh, Yizhong Wang, Hannaneh Hajishirzi, and Noah A Smith. 2024. Set the clock: Temporal alignment of pretrained language models. arXiv preprint arXiv:2402.16797 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.573, + 0.913, + 0.621 + ], + "angle": 0, + "content": "[105] Xuejiao Zhao, Siyan Liu, Su-Yin Yang, and Chunyan Miao. 2025. MedRAG: Enhancing Retrieval-augmented Generation with Knowledge Graph-Elicited Reasoning for Healthcare Copilot. arXiv preprint arXiv:2502.04413 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.622, + 0.913, + 0.672 + ], + "angle": 0, + "content": "[106] Yuxiang Zheng, Dayuan Fu, Xiangkun Hu, Xiaojie Cai, Lyumanshan Ye, Pengrui Lu, and Pengfei Liu. 2025. DeepResearcher: Scaling Deep Research via Reinforcement Learning in Real-world Environments. arXiv preprint arXiv:2504.03160 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.673, + 0.913, + 0.722 + ], + "angle": 0, + "content": "[107] Yijie Zhong, Feifan Wu, Mengying Guo, Xiaolian Zhang, Meng Wang, and Haofen Wang. 2025. Meta-PKE: Memory-Enhanced Task-Adaptive Personal Knowledge Extraction in Daily Life. Information Processing & Management 62, 4 (2025), 104097." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.723, + 0.913, + 0.76 + ], + "angle": 0, + "content": "[108] Yujia Zhou, Zheng Liu, Jiajie Jin, Jian-Yun Nie, and Zhicheng Dou. 2024. Metacognitive retrieval-augmented large language models. In Proceedings of the ACM Web Conference 2024. 1453-1463." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.761, + 0.913, + 0.81 + ], + "angle": 0, + "content": "[109] Jiachen Zhu, Congmin Zheng, Jianghao Lin, Kounianhua Du, Ying Wen, Yong Yu, Jun Wang, and Weinan Zhang. 2025. Retrieval-Augmented Process Reward Model for Generalizable Mathematical Reasoning. arXiv preprint arXiv:2502.14361 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.811, + 0.913, + 0.861 + ], + "angle": 0, + "content": "[110] Rongzhi Zhu, Xiangyu Liu, Zequn Sun, Yiwei Wang, and Wei Hu. 2025. Mitigating Lost-in-Retrieval Problems in Retrieval Augmented Multi-Hop Question Answering. arXiv preprint arXiv:2502.14245 (2025)." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.093, + 0.913, + 0.861 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.381, + 0.073 + ], + "angle": 0, + "content": "Synergizing RAG and Reasoning: A Systematic Review" + }, + { + "type": "header", + "bbox": [ + 0.653, + 0.06, + 0.914, + 0.073 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.091, + 0.177, + 0.108 + ], + "angle": 0, + "content": "Appendix" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.111, + 0.38, + 0.126 + ], + "angle": 0, + "content": "Agentic RAG Symbol Reference System" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.129, + 0.482, + 0.176 + ], + "angle": 0, + "content": "The following table presents a complete symbol reference system with formally defined mathematical notations for all core concepts." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.186, + 0.279, + 0.202 + ], + "angle": 0, + "content": "Symbol Design Hierarchy" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.205, + 0.411, + 0.22 + ], + "angle": 0, + "content": "- Base states/actions: Standard font \\((S_{t},a_{t})\\)" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.22, + 0.384, + 0.235 + ], + "angle": 0, + "content": "- Sets/spaces: Calligraphic font \\((\\mathcal{A},\\mathcal{K}_t)\\)" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.235, + 0.482, + 0.251 + ], + "angle": 0, + "content": "- Core mechanism functions: Uppercase Greek \\((\\Psi, \\Gamma)\\)" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.251, + 0.46, + 0.266 + ], + "angle": 0, + "content": "- Operational functions: Calligraphic font \\((\\mathcal{R},\\mathcal{T}_a)\\)" + }, + { + "type": "list", + "bbox": [ + 0.11, + 0.205, + 0.482, + 0.266 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.092, + 0.916, + 0.122 + ], + "angle": 0, + "content": "- Auxiliary functions: Lowercase Greek \\((\\delta, \\phi)\\) or blackboard bold (I)" + }, + { + "type": "title", + "bbox": [ + 0.516, + 0.134, + 0.691, + 0.147 + ], + "angle": 0, + "content": "Annotation Guidelines" + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.153, + 0.738, + 0.167 + ], + "angle": 0, + "content": "- Symbol disambiguation:" + }, + { + "type": "text", + "bbox": [ + 0.555, + 0.168, + 0.909, + 0.182 + ], + "angle": 0, + "content": "- \\(\\mathcal{R}\\) strictly denotes retrieval function (vs. reward \\(R\\))" + }, + { + "type": "text", + "bbox": [ + 0.555, + 0.183, + 0.912, + 0.213 + ], + "angle": 0, + "content": "- \\(\\delta\\) exclusively represents state transitions (vs. branch selector \\(\\psi\\))" + }, + { + "type": "list", + "bbox": [ + 0.555, + 0.168, + 0.912, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.214, + 0.714, + 0.228 + ], + "angle": 0, + "content": "- Dynamic extensions:" + }, + { + "type": "text", + "bbox": [ + 0.555, + 0.228, + 0.912, + 0.259 + ], + "angle": 0, + "content": "- Action space \\(\\mathcal{A}\\) and knowledge base \\(\\mathcal{K}_t\\) support incremental updates: \\(\\mathcal{K}_{t + 1} = \\mathcal{K}_t\\oplus \\mathrm{Retrieve}(q_t)\\)" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.06, + 0.347, + 0.073 + ], + "angle": 0, + "content": "Conference'17, July 2017, Washington, DC, USA" + }, + { + "type": "header", + "bbox": [ + 0.858, + 0.06, + 0.912, + 0.072 + ], + "angle": 0, + "content": "Gao et al." + }, + { + "type": "table_caption", + "bbox": [ + 0.345, + 0.09, + 0.652, + 0.105 + ], + "angle": 0, + "content": "Table 3. Basic states and system components" + }, + { + "type": "table", + "bbox": [ + 0.166, + 0.118, + 0.833, + 0.206 + ], + "angle": 0, + "content": "
SymbolTypeDefinition & Description
St=(Ht,Ct)Composite stateComplete system state at timestep t, containing historical information and context vectors
HtVector/SetHistorical information aggregation
CtVectorContextual embedding vectors
qtVectorVector representation of current query at step t
KtSetDynamic knowledge base ( Initialized as K0=∅)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.348, + 0.214, + 0.651, + 0.23 + ], + "angle": 0, + "content": "Table 4. Action space and policy definitions" + }, + { + "type": "table", + "bbox": [ + 0.167, + 0.244, + 0.832, + 0.306 + ], + "angle": 0, + "content": "
SymbolTypeDefinition & Description
ASetAction space, e.g., A = {Retrieve, Generate, Verify, Terminate}
atScalarSelected action at timestep t (at ∈ A)
π(St; Θ)FunctionPolicy function with parameters Θ, mapping states to action probability distributions (π: S → Δ(A))
" + }, + { + "type": "table_caption", + "bbox": [ + 0.371, + 0.314, + 0.625, + 0.327 + ], + "angle": 0, + "content": "Table 5. State transition mechanisms" + }, + { + "type": "table", + "bbox": [ + 0.165, + 0.343, + 0.83, + 0.433 + ], + "angle": 0, + "content": "
SymbolTypeDefinition & Description
δFunctionState transition function, update rule St+1 = δ(St, ·)
TaFunctionLow-level state transition operation for action a (e.g., TRetrieve denotes retrieval)
RFunctionRetrieval function, R(St) returns retrieval results
OperatorFunction composition operator (e.g., f∘g(x) = f(g(x)))
" + }, + { + "type": "table_caption", + "bbox": [ + 0.333, + 0.441, + 0.664, + 0.457 + ], + "angle": 0, + "content": "Table 6. Feedback and optimization components" + }, + { + "type": "table", + "bbox": [ + 0.207, + 0.47, + 0.791, + 0.569 + ], + "angle": 0, + "content": "
SymbolTypeDefinition & Description
R(St, at, St+1)FunctionReward function, outputs reward value rt
I(·)FunctionIndicator function (returns 1 if condition holds, else 0)
∇θJ(θ)OperatorPolicy gradient for optimizing policy parameters Θ
γScalarDiscount factor for cumulative reward calculation
" + }, + { + "type": "table_caption", + "bbox": [ + 0.371, + 0.577, + 0.626, + 0.593 + ], + "angle": 0, + "content": "Table 7. Submodule-specific symbols" + }, + { + "type": "table", + "bbox": [ + 0.167, + 0.607, + 0.833, + 0.71 + ], + "angle": 0, + "content": "
SymbolTypeDefinition & Description
ΨFunctionReasoning function, generates intermediate reasoning results
ΓFunctionDecision function, produces final outputs (e.g., answers)
ψ(·)FunctionBranch selector for reflective reasoning path selection
φ(·)FunctionConfidence mapping function (evaluations to scalar confidence)
τScalarDecision threshold for triggering specific operations (e.g., verification/termination)
" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15909/41ce0363-307a-4461-bbaf-6fdf5036b2e7_origin.pdf b/data/2025/2504_15xxx/2504.15909/41ce0363-307a-4461-bbaf-6fdf5036b2e7_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..53d5a3833958ac8ec96dcfab89c6425d58127e15 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/41ce0363-307a-4461-bbaf-6fdf5036b2e7_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3dbeec63e77fed7ba33b5927bec52a3ccd90c967333c2faac71aea0315a53d8 +size 13759226 diff --git a/data/2025/2504_15xxx/2504.15909/full.md b/data/2025/2504_15xxx/2504.15909/full.md new file mode 100644 index 0000000000000000000000000000000000000000..90f3a4d1fc06b1e29e275965581e541e4fd4e94f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/full.md @@ -0,0 +1,1082 @@ +# Synergizing RAG and Reasoning: A Systematic Review + +Yunfan Gao + +Shanghai Research Institute for + +Intelligent Autonomous Systems, + +Tongji University + +China + +gaoyunfan1602@gmail.com + +Yuxi Bi + +College of Design and Innovation, + +Tongji University + +China + +yuxibi@gmail.com + +Yun Xiong + +Shanghai Key Laboratory of Data + +Science, School of Computer Science, + +Fudan University + +China + +yunx@fudan.edu.cn + +Ming Xue + +Percena AI + +China + +mxue@percena.co + +Yijie Zhong + +College of Design and Innovation, + +Tongji University + +China + +dun.haski@gmail.com + +Haofen Wang* + +College of Design and Innovation, + +Tongji University + +China + +carter.whfcarter@gmail.com + +# Abstract + +Recent breakthroughs in large language models (LLMs), particularly in reasoning capabilities, have propelled Retrieval-Augmented Generation (RAG) to unprecedented levels. By synergizing retrieval mechanisms with advanced reasoning, LLMs can now tackle increasingly complex problems. This paper presents a systematic review of the collaborative interplay between RAG and reasoning, clearly defining "reasoning" within the RAG context. It construct a comprehensive taxonomy encompassing multi-dimensional collaborative objectives, representative paradigms, and technical implementations, and analyze the bidirectional synergy methods. Additionally, we critically evaluate current limitations in RAG assessment, including the absence of intermediate supervision for multi-step reasoning and practical challenges related to cost-risk trade-offs. To bridge theory and practice, we provide practical guidelines tailored to diverse real-world applications. Finally, we identify promising research directions, such as graph-based knowledge integration, hybrid model collaboration, and RL-driven optimization. Overall, this work presents a theoretical framework and practical foundation to advance RAG systems in academia and industry, fostering the next generation of RAG solutions. + +# 1 Introduction + +Recent breakthroughs in Large Language Models (LLMs) like OpenAI O1 [39] and DeepSeek-R1 [25] have shifted the paradigm from "pre-training scaling" to "test-time scaling" [63]. Unlike traditional language models that improve via corpus accumulation during pre-training, these models enhance performance in complex tasks—such as mathematical derivation and code generation [29]—through post-training innovations during the inference phase (e.g., Long-CoT thinking [8]). This shift has led to the emergence of "Large Reasoning Models" (LRMs) [99] with advanced internal reasoning abilities. + +These advancements have not only boosted basic model capabilities but also opened new avenues for application technologies like Retrieval-Augmented Generation (RAG) [21]. Serving as a key link between language models and external knowledge, RAG overcomes traditional LLMs' limits in knowledge freshness, domain specificity, and factual accuracy by retrieving real-time non-parametric information and integrating it into the context. This enhances information processing and reduces hallucination risks in knowledge-intensive tasks. + +Technological evolution is advancing RAG architectures through innovations like query rewriting [61], re-ranking [1], and hybrid retrieval [88], creating an Advanced RAG paradigm focused on pre-retrieval optimization and post-retrieval refinement. Modular RAG [22] further breaks down these systems into component-based, service-oriented architectures, using orchestration to tackle practical challenges. + +Despite improvements in query intent recognition and knowledge use, challenges of RAG remain in demanding tasks like deep research and complex decision-making. Key issues include: 1) difficulty capturing intent from ambiguous queries; 2) poor logical coherence in multi-hop reasoning; 3) efficiency limits of traditional retrieval in open domains; and 4) degraded generation quality from noisy retrieved data. + +Models like DeepSeek-R1, with strong reasoning capabilities, inspire new directions for RAG systems. As shown in Figure 1, recent research explores integrating formal reasoning frameworks with knowledge retrieval. This approach optimizes retrieval through logic-driven query reformulation and uses reasoning to analyze and validate retrieved knowledge, creating cognitive synergy between retrieval and generation. This paradigm aims to overcome conventional limitations, enabling intelligent systems with rigorous logic and reliable knowledge use. From a trend perspective, an increasing number of methods combine reasoning and retrieval abilities through reinforcement learning (RL), marking a new direction in the LRM era. Meanwhile, prompt-based approaches continue to rapidly evolve, with researchers aiming + +![](images/9e84b6f53ff577c819991b081153340f022d99ed4e4207a9a3ce616a9fa9e815.jpg) +Figure 1. Timeline of studies on RAG-reasoning synergy. From a technical perspective, the approaches can be categorized into Prompt-Based, Tuning-Based, and RL-Based methods. A notable trend is the increasing use of Reinforcement Learning to enhance RAG systems, particularly following the prosperity of test-time scaling. Meanwhile, Prompt-Based and Tuning-Based methods continue to evolve in parallel, demonstrating that there are multiple pathways to integrating reasoning capabilities into RAG systems. + +to achieve results through workflow design while keeping model parameters frozen. Notably, sole reliance on tuning methods is steadily decreasing, suggesting limited improvements from additional fine-tuning at this stage. + +Traditional RAG is limited by its unidirectional flow (retrieval $\rightarrow$ generation). Integrating reasoning capabilities grants the system greater autonomy, unlocking new possibilities. As shown in Figure 2, this integration is poised to drive major breakthroughs, enabling practical use in complex real-world scenarios. + +1) From Ambiguous Semantic Matching to Logic-Driven Targeted Retrieval. Traditional RAG relies on semantic similarity for retrieval; however, it is sensitive to phrasing variations. Advanced reasoning allows deep logical analysis of queries (e.g., causal links, conditional constraints) to dynamically refine retrieval strategies [24]. For example, to answer "How to reduce postoperative infection risks in diabetes patients?", the system prioritizes retrieving "blood glucose control thresholds" and "antibiotic usage guidelines" over simply matching "diabetes postoperative care". This approach supports multi-hop retrieval by breaking down complex queries into sequential sub-queries while preserving cross-document coherence through reasoning chains. + +2) From Simple Information Aggregation to Logically Coherent Context Construction. Current RAG systems input all retrieved document chunks into context directly, often causing fragmented or contradictory information that confuses LLMs. Reasoning-enhanced systems integrate evidence chains by logically verifying and inferring causality in retrieved content, filtering conflicts and forming coherent explanations [100]. They also use dynamic knowledge completion to detect missing logical links, prompting iterative retrieval or inference to fill gaps [51]. +3) From Simple and Single-Turn QA to Systemic Decision Support. Traditional RAG performs well in factual QA [65] but struggles with multi-step and complex decision-making. Reasoning-integrated systems produce structured reasoning output, enhancing multi-objective optimization to balance retrieval breadth and solution feasibility under various constraints. For example, multiple constraints under different conditions in engineering construction plans [54], and the formulation of diagnosis and treatment plans for various diseases in the medical field [105]. +4) From Indiscriminate Retrieval to Intelligent Resource Allocation. Traditional RAG retrieves documents for all queries, regardless of complexity. Reasoning-enhanced systems use on-demand retrieval, handling simple queries + +![](images/41e010600a7e8b306e4b7d692efd51a598046a1382a35dd7bb01fd4aa49ff2f2.jpg) +Figure 2. Advantages of Combining RAG with Reasoning + +with direct generation and complex ones with multi-round retrieval to reduce latency [20]. Dynamic retrieval pruning uses pre-reasoning predictions to target key information, minimizing unnecessary document and graph traversal [41]. + +5) From Passive Knowledge Tool to Proactive Cognitive Assistant. Advancing beyond reactive knowledge retrieval, reasoning-enhanced systems can proactively serve users by asking clarifying questions and anticipating implicit needs. This shift enables human-like assistants that integrate memory, reasoning, and decision-making, proving especially valuable for complex tasks such as deep research [43], business analytics [50], personal assistant [107] and urban planning [85]. + +However, the synergistic pathway between RAG and reasoning requires more than simply replacing conventional generative LLMs with LRM modules. It necessitates deep integration of technological evolution insights from LRM - achieved through reconstructing knowledge retrieval mechanisms and strengthening reasoning-generation collaborative linkages - to enable system-level enhancement of cognitive capabilities within the RAG architecture. + +Therefore, this paper aims to address the pivotal and forward-looking research question of "how RAG systems can synergize with reasoning capabilities". We systematically review current studies after 2024 while establishing explicit definitions for reasoning within RAG contexts. Building on this foundation, we provide an in-depth taxonomy and analysis of the objectives, typical patterns, and implementations underlying RAG-reasoning integration, clarifying key technological trajectories and critical breakthroughs. + +As RAG technology enters its next developmental phase, downstream task complexity has escalated significantly - + +particularly evident in emerging challenges like Deep Research [106]. These advanced applications not only demand enhanced reasoning capacities but also drive RAG's expansion into multimodal, cross-domain, and dynamic environments. However, while the integration of reasoning capabilities demonstrably improves complex task performance, existing research frequently overlooks associated computational overheads and potential risks. Through systematic examination of these operational constraints and analysis of industry applications, we propose practical guidelines for multiple real-world scenarios with diverse requirements. + +Finally, we outline future research directions grounded in current technological evolution, including: 1) RAG-graph architecture integration, 2) coordinated multimodal reasoning frameworks, 3) hybrid model collaboration, and 4) RL optimization specifically designed for RAG systems. This work establishes both theoretical foundations and practical roadmaps for subsequent research in this evolving field. + +The contributions of this paper can be summarized as follows: + +- Pioneering Review. This work represents the first comprehensive survey focusing on the integration of RAG with reasoning, offering novel insights and forward-looking guidance for advancing this emerging research frontier. +- Systematic Taxonomy. We present a multi-dimensional framework to systematically examine the objectives, paradigms, and methodologies for combining RAG with reasoning capabilities, establishing clear classification criteria across technical dimensions. +- Practical Guidance. Beyond theoretical exploration, we critically discuss the additional cost and potential + +risks associated with the introduction of reasoning, accompanied by an actionable Practical Guide for real-world scenarios. + +- Open Resource Platform1 Through the OpenRAG platform, we provide a rich, multi-dimensional review of related work, which allows readers to quickly search and compare different methods. + +# 2 Overview + +This chapter establishes a conceptual framework for the paper along two key dimensions. First, it formally defines "reasoning" and distinguishes it from "inference." Second, it organizes a taxonomy of synergy mechanisms between "RAG and Reasoning." To construct a clear cognitive pathway, we address three progressive research questions: + +- Why synergize RAG and reasoning? +- What are their typical collaboration paradigms? +- How can this integration be realized? + +# 2.1 Definition + +The definition of reasoning in modern AI systems remains an evolving construct, particularly within the context of LRMs exemplified by DeepSeek R1 and OpenAI O1. Here, under the scope of LLMs, we formalize reasoning as a structured, multi-step process that dynamically decomposes complex problems, generates intermediate hypotheses, and iteratively refines solutions through logical and evidence-based transformations. Mathematically, let a reasoning process $\mathcal{R}$ be defined as a tuple $\langle \mathcal{K}_p, \mathcal{K}_r, S_t, \Phi \rangle$ , where $\mathcal{K}_p$ denotes parametric knowledge embeddings, $\mathcal{K}_r$ represents retrieved contextual knowledge, $S_t = \{s_0, s_1, \ldots, s_n\}$ constitutes the evolving state sequence with $s_0$ as the initial query and $s_n$ as the final response, and $\Phi : S_i \times \mathcal{K}_p \times \mathcal{K}_r \to S_{i+1}$ defines the state transition function. + +The reasoning process exhibits three defining characteristics. First, it is inherently multi-step, systematically decomposing complex problems into intermediate cognitive states (e.g., sub-question generation or temporary conclusions) rather than pursuing direct input-output mapping. Second, it generates novel knowledge or facts – synthesizing implicit relationships, deriving latent constraints, or reformulating problems in ways not explicitly present in the initial input or parametric memory (e.g., transforming "Is A greater than B?" into comparative subquestions about A and B's attributes). Crucially, these representations are not merely retrieved but dynamically constructed through the reasoning trajectory. Third, the process is teleological – its architecture and termination conditions are explicitly optimized for complex problem resolution, where complexity is measured by the necessity of state transitions or the insufficiency of direct retrieval from either parametric $(\mathcal{K}_p)$ + +or external $(\mathcal{K}_r)$ knowledge sources. This stands in stark contrast to atomic inference, which lacks such deliberate state construction and goal-aware iteration. + +The distinction between reasoning and inference manifests most saliently in their computational signatures. While inference $\mathcal{I}$ constitutes a single-step conditional probability computation $P(y|x) = \prod_{t=1}^{T} P(y_t|x, y_{, ) to steer model behavior, tuning-based methods that inject domain-specific knowledge or distill reasoning capability, and RL-based frameworks that optimize retrieval-reasoning policies through outcome reward models (ORM) or process reward models (PRM). The alignment between these + +methodologies and the proposed taxonomy is critical—static workflows predominantly rely on predictable prompt-guided reasoning chains, whereas dynamic systems increasingly integrate search-based exploration or solver-augmented strategies to navigate evolving state spaces. + +Overall, this tripartite taxonomy—motivational drivers, architectural paradigms, and implementation methodologies—establishes a unified lens for analyzing RAG+Reasoning systems. Subsequent chapters will elaborate on each stratum, progressively revealing how these conceptual distinctions translate into technical innovations that push the boundaries of machine intelligence. + +# 3 The purpose of the synergy + +The integration of RAG and reasoning marks a crucial advancement in enhancing LLMs' problem-solving abilities. Their true potential lies not in isolated use but in their synergy, which overcomes key limitations in retrieval and reasoning. This section explains the main motivations for combining RAG with reasoning, emphasizing two primary benefits: (1) enhancing retrieval accuracy and flexibility through reasoning, and (2) reinforcing complex reasoning by using context-rich retrieved knowledge. Figure 4 illustrates these collaborative aims and the limitations they address. + +The first key benefit is Reasoning-Augmented Retrieval where reasoning improves the retrieval process. Traditional RAG systems struggle with query formulation, relevance assessment, and iterative refinement—tasks needing logical and contextual analysis. Reasoning enables adaptive retrieval through dynamic query expansion, ambiguity resolution, and multi-hop evidence aggregation, overcoming the limits of keyword- or embedding-based methods and aligning retrieval with the task's reasoning demands. + +The second benefit is Retrieval-Augmented Reasoning, where external knowledge supplements the limitations of purely parametric LLM reasoning. Even advanced models face hallucination, knowledge gaps, and compositional challenges alone. Retrieval grounds reasoning in up-to-date, domain-specific, or rare information absent from model weights, crucial for explainability, multi-step deduction, and integrating diverse sources. + +Together, combining RAG and reasoning fills fundamental gaps in both techniques. By enhancing retrieval via reasoning and strengthening reasoning through retrieval, it broadens LLMs' capacity to address complex real-world problems. + +# 3.1 Reasoning-Augmented Retrieval + +Reasoning-Augmented Retrieval (RAR) represents a significant advancement in information retrieval by integrating multi-step reasoning to dynamically enhance retrieval quality. Unlike traditional methods that depend on static semantic + +matching, RAR creates a cognitive feedback loop mimicking human iterative reasoning, surpassing the limitations of simple "query-document" interactions. + +RAR's effectiveness stems from several key features. It often uses on-demand retrieval, where reasoning-evaluating intent clarity, knowledge state, and temporal factors-guides adaptive search initiation, reducing redundancies present in fixed triggers (e.g., UAR's classifier [14]). It improves semantic alignment by inferring implicit query logic such as business rules or entity relationships to generate precise retrieval requests aligned with data schemas (e.g., PlanRAG's plan-retrieval loops [48]). RAR also applies multi-step iterative refinement, using intermediate reasoning outputs (e.g., chain-of-thought, partial answers [78]) to recursively reformulate queries in a closed-loop system essential for resolving multi-hop dependencies [68]. Furthermore, it adapts to specific domains by tailoring retrieval to vertical contexts (e.g., financial or medical) and balances efficiency and precision through lightweight reasoning strategies (e.g., AdaptiveRAG's complexity-based selection [41]). + +Traditional retrieval systems, effective for simple queries, struggle with complex information needs due to rigid designs favoring static matching over dynamic reasoning, limiting their adaptability to changing contexts and diverse data. RAR primarily addresses five core challenges inherent in these conventional methods. + +3.1.1 Semantic Disparities Between Queries and Documents. A key challenge lies in the mismatch between user queries and documents—whether due to differing expression styles (professional jargon vs. casual language) or implicit contextual gaps—making direct semantic matching unreliable. Importantly, high similarity does not guarantee true relevance, as documents may share keywords or surface features without addressing the underlying intent or logic of the query. Retrieval models must therefore understand deeper semantics beyond superficial similarity. Domain adaptation further complicates this issue. To overcome these gaps, approaches such as reasoning-augmented embeddings (O1-Embedder [101] enriches queries with inferred "thinking" text), feedback-driven rewriting (SmartRAG [20] dynamically refines queries based on retrieved results), and preplanning (PlanRAG [48] extracts business rules to generate SQL queries aligned with database schemas) help better capture domain-specific semantics and ensure relevance beyond mere similarity. + +3.1.2 Inflexible Intent Disambiguation. Traditional RAG methods rely on fixed embedding similarity strategies, which fail to dynamically interpret the implicit intent behind complex queries (e.g., multi-hop reasoning or domain-specific requirements). User queries often exhibit semantic complexity that far exceeds their surface text—for instance, a request to "optimize supply chain costs" may require correlating disparate database fields not explicitly + +![](images/09571464f1bd88bbbe376dc373e70dd2c58a83763d1dbb94f0dcc2d042a01304.jpg) +Figure 4. The purpose of the synergy between RAG and reasoning + +# Core Limitations in RAG + +# Semantic Disparities + +Lexical and contextual disparitie + +(e.g., terminology mismatch, implicit + +context absence) + +Failure of semantic similarity matching + +# Knowledge Gaps + +Long-range reasoning tasks(e.g., multi- + +hop QA) + +Requiring logical integration across + +multiple knowledge segments + +Absence of intermediate knowledge + +leads to reasoning chain fragmentation + +# Core Limitations + +# in Reasoning + +![](images/134e9289b64c48e173041598397d78a2ffff93d776484a12645311f697b63f1f.jpg) + +# Inflexible Intent Disambiguation + +Failure to resolve implicit intents in + +complex queries + +(e.g., multi-hop reasoning, domain- + +specific requirements) + +The semantic complexity of user queries + +may far exceed their surface text + +# Heterogeneous Data Collaboration + +Schema-disparate data sources + +(e.g., structured records vs. unstructured passages) + +Requires cross-modal retrieval and alignment + +# Efficiency vs. Precision + +Comprehensive Retrieval $\rightarrow$ Overhead + +Restricted Retrieval $\rightarrow$ Critical info loss + +Iterations $\uparrow \rightarrow$ Computational costs + +Lack of dynamic trade-off mechanism + +# Reasoning Augmented Retrieval + +# RAG + +# Reasoning + +# Retrieval + +# Augmented Reasoning + +# Search Space Explosion & Local Optima Traps + +Search space grows exponentially with reasoning steps + +Traditional multi-step reasoning methods lack external knowledge constraints + +Lead to invalid hypotheses, local optima + +traps, or logical inconsistencies + +# Domain Knowledge Boudary + +Pre-trained models exhibit constrained knowledge coverage + +Struggle with tasks requiring domain-specific expertise + +(e.g., semiconductor design) + +Processing tasks requiring real-time information is challenging + +# Dynamic Knowledge Requirements + +Progressively evolving knowledge requirements + +Initial retrieval results are irrelevant or redundant to subsequent reasoning steps + +Dynamically evolving information needs in complex reasoning tasks + +Fixed retrieval strategies struggle to achieve real-time matching + +# Insufficient Depth & Breadth + +The inherent static knowledge of LLMs + +Challenge of covering dynamically evolving domain knowledge boundaries + +The reasoning chains frequently terminate at superficial associations + +The inability to establish cross-domain, multi-level knowledge connections + +mentioned. Static retrieval methods lack the adaptability to capture such dynamically evolving information needs. A critical limitation lies in intent dynamicity: as contextual understanding expands, traditional systems generate fixed retrieval results based solely on the initial query. Furthermore, semantic representation limitations of dense retrieval models (e.g., BERT-based models) hinder their ability to encode intricate semantic relationships (e.g., irony, metaphors), leading to misaligned results. Current approaches attempt to mitigate these issues through multi-step intent decomposition (e.g., LevelRAG's high-level searcher breaks complex queries into multi-hop sub-queries [103]) and dynamic query reformulation (e.g., LeReT's reinforcement learning generates diversified query candidates [34]), iteratively refining retrieval strategies to align with document content. + +3.1.3 Inefficient Coordination of Multi-Source Heterogeneous Data. Retrieval from diverse sources—text, tables, graphs, web, and APIs—often produces fragmented results due to a lack of global reasoning. The key challenge is modal heterogeneity: different retrieval techniques (dense + +retrieval for text, SQL for tables, GQL for graphs) operate independently without unified coordination. For example, experiments show standard RAG methods (like dense retrieval with query decomposition) yield only $32.7\%$ perfect recall and $40.9\%$ F1 on the OTT-QA dataset. These outcomes reveal the limitations of traditional approaches in aligning textual queries with structured tables—such as failing to link concepts like "K-12 student free rates" in text to related "education expenditure" columns when not explicitly mentioned. Additionally, disconnected entity matching (e.g., relating "company revenue" in text to financial tables) worsens inefficiencies, as conventional methods depend on semantic similarity and overlook domain-specific relationships and exact-value matches. Advanced techniques—such as reasoning-driven alignment (ARM's N-gram constraints for cross-modal entity decoding [7]) and unified semantic spaces (LevelRAG's shared multi-modal representations [103])—enable more effective, integrated retrieval. + +3.1.4 Incompleteness and Incoherence in Complex Retrieval Tasks. Single-step retrieval systems fall short in + +complex multi-hop reasoning tasks, such as deducing entity chains or conducting decision analysis. Traditional static retrieval conflicts with multi-step cognitive needs, resulting in three main issues: 1) Path dependency, where later retrievals rely on information from earlier steps (e.g., finding "the most populous county in California" before its education policies), but conventional systems lack state management; 2) Error propagation, early retrieval errors cause mistakes in intermediate results, which then affect the next round of retrieval; 3) Semantic inflexibility of fixed queries, which cannot adapt to dynamic concepts like entity aliases or relational predicates. + +Advanced methods address these flaws through integrated strategies. PlanRAG uses iterative "plan-retrospect-replan" cycles to trigger sub-queries when gaps arise. Reinforcement learning in LeReT improves query generation via reward-driven path selection. Likewise, ITER-RETGEN rebuilds follow-up queries using intermediate answers (e.g., "award recipient's height") to resolve multi-hop dependencies. + +3.1.5 Trade-offs Between Retrieval Efficiency and Precision. Complex scenarios face a tension between exhaustive retrieval, which is computationally costly, and restricted retrieval, which risks information loss. Expanding retrieval blindly inflates costs (e.g., LLM API calls) without ensuring relevance. Simple queries suffer from unnecessary multi-step retrieval, wasting resources, while complex queries face quality risks if retrieval is too limited. Adaptive approaches like complexity-aware routing (Adaptive-RAG's lightweight classifier allocates retrieval budgets [41]) and cost-sensitive training (SmartRAG's reinforcement learning balances quality and steps [20]) dynamically manage this trade-off. + +In summary, Reasoning-Augmented Retrieval overcomes traditional RAG's limitations in dynamic triggering, semantic alignment, multi-hop support, domain adaptation, and efficiency trade-offs by deeply integrating reasoning into the retrieval process. Its key innovation is a bidirectional enhancement between reasoning and retrieval—reasoning refines retrieval strategies, while retrieval supports iterative reasoning—jointly boosting accuracy and efficiency in complex information tasks. + +# 3.2 Retrieval-Augmented Reasoning + +Retrieval-Augmented Reasoning (ReAR) combines external knowledge retrieval with inherent model reasoning to overcome failures from knowledge gaps or logical discontinuities in complex tasks. Unlike traditional RAG methods that retrieve information once, ReAR uses an iterative, context-sensitive retrieval that continuously provides relevant data to support multi-step reasoning. This approach is crucial for tasks needing strict logic, such as mathematical proofs, where intermediate steps require specific theorems or lemmas. By making retrieval an adaptive, ongoing process rather than a one-time step, ReAR strengthens each reasoning stage + +with accurate, current information, improving the overall inference's reliability and robustness. + +ReAR's core feature is dynamic knowledge supplementation, generating retrieval queries in real-time based on the evolving reasoning context. This overcomes the limits of single-round retrieval by enabling knowledge refinement at each step, as seen in process supervision frameworks like RAG-Gym [96]. ReAR also improves reasoning paths using methods like search space compression—for example, MCTS-guided heuristics in KBQA—and structured feedback from diverse sources like knowledge graphs [97]. These techniques maintain logical consistency while reducing irrelevant or conflicting information. Importantly, ReAR adapts well across domains, supporting precise knowledge retrieval and tool use for specialized tasks such as industrial problem-solving in PIKE [82] or scientific reasoning [106]. + +By integrating retrieval as an active part of the reasoning loop, ReAR addresses LLMs' temporal and depth constraints, ensuring adherence to domain-specific and time-sensitive requirements. This close coupling turns external knowledge into an on-demand resource, creating a closed-loop system that enhances the model's ability to handle complex, knowledge-intensive problems. Specifically, ReAR seeks to address the following limitations and challenges: + +3.2.1 Knowledge Gap in Multi-step Reasoning. In long-range reasoning, missing intermediate knowledge often breaks logical chains, especially in industrial and scientific contexts requiring multi-source data integration (e.g., text, tables, time-series). Static retrieval methods worsen this by not adapting to the reasoning process's changing needs. ReAR techniques address this with chained retrieval, as in CoRAG [83], which breaks multi-hop questions into sequential sub-queries (e.g., retrieving "event causes" then their "impacts"), systematically linking knowledge. Reasoning-state-aware retrieval, used in FLARE [45], predicts future information needs by generating interim prompts (e.g., "the next step requires discussion of ..."), enabling dynamic query construction that preserves coherence. Together, these approaches resolve the conflict between discrete retrieval and continuous reasoning. + +3.2.2 Reasoning Discontinuity Caused by Domain Knowledge Boundaries. Reasoning discontinuity arises from LLMs' limited knowledge, struggling with specialized domains (e.g., semiconductor design in PIKE [82]) and real-time data (e.g., medical parameters in Agentic Reasoning [92]). End-to-end models often produce factual errors, while traditional RAG methods fail to retrieve deep professional knowledge due to coarse retrieval, especially with complex data like tables, charts and images. + +ReAR addresses this with two complementary solutions: knowledge atomization and structural organization, as in PIKE's decomposition of documents into fine-grained units and multi-layer knowledge graphs for semantic and logical + +retrieval; and dynamic tool integration, as in Agentic Reasoning's real-time data acquisition via code execution and API calls to compute critical indicators (e.g., medical FiO2). These innovations overcome the challenges of specialized knowledge depth and timely information relevance that limit conventional methods. + +# 3.2.3 Search Space Explosion and Local Optima Traps. + +The main challenge in multi-step reasoning is the exponential growth of the search space, where methods like Chain-of-Thought (CoT) often yield suboptimal or inconsistent results due to unconstrained hypotheses. Traditional approaches like CoT and Tree-of-Thought (ToT) lack external knowledge constraints, causing invalid assumptions, while purely symbolic reasoning falls short in open-domain tasks. To address this, two strategies are used: knowledge base-anchored heuristic search (KBQA-O1 [58]), which limits reasoning actions to subgraphs in knowledge graphs, and a retrieval-verification mechanism (Search-o1 [51]) that prunes unsupported reasoning paths using evidence from the knowledge base. Together, these reduce the search space and preserve reasoning coherence. + +3.2.4 Dynamic Knowledge Requirements in Multi-Step Reasoning. Complex multi-step reasoning tasks face the challenge of continuously changing knowledge requirements. This is evident in cases like multi-hop reasoning and engineering planning, where each stage generates new sub-problems (e.g., moving from "architectural design" to "material cost estimation"). Static knowledge bases or one-time retrieval methods cannot meet this evolving demand. This manifests in two ways: initial knowledge may miss later needs, causing gaps; and fixed knowledge sets may include irrelevant information, reducing reasoning accuracy. To address this, new retrieval-augmented reasoning approaches introduce dynamic solutions: process supervision (e.g., reward models in RAG-Gym [96]) detects knowledge gaps in real time, atomic decision-making (e.g., step decomposition in DeepRAG [24]) triggers retrieval as needed, and tree-like expansions (e.g., multi-path retrieval in DeepSolution [54]) enable parallel exploration. By integrating knowledge retrieval within reasoning, these methods let the system identify, supplement, and verify knowledge dynamically—much like a human expert—greatly enhancing the reliability and completeness of complex reasoning. + +# 3.2.5 Insufficient Depth and Breadth of Reasoning. + +This issue is prominent in expert tasks like medical diagnosis, legal analysis, and research report generation. LLMs' static knowledge often fails to capture the evolving scope of domain knowledge, resulting in shallow reasoning that misses multi-level, cross-domain connections. For example, when assessing "Company A is affected by economic recession," traditional methods rely on superficial statistical + +patterns and cannot systematically follow the deeper logical chain from "Company A $\rightarrow$ industry supply chain $\rightarrow$ macroeconomic policy $\rightarrow$ international political landscape," leading to reasoning that lacks causal depth. + +To overcome this, recent advances use structured, retrieval-enhanced frameworks. ToG2.0 [60] models Knowledge Graph relational paths as retrieval guidance vectors, enabling targeted queries along entity paths, surpassing the limits of keyword-based retrieval. This approach complements CR-Planner's [52] iterative expansion, which triggers retrieval of specialized knowledge (e.g., textbook proofs of algorithm complexity) at critical reasoning points, ensuring accurate domain knowledge integration via multi-round validation. Addressing cross-domain knowledge linkage, CO-STORM [43] employs a multi-agent system whose host module generates cross-modal retrieval commands by analyzing potential semantics in uncited documents. + +# 4 Patterns of synergy + +Section 3 detailed the need and motivation for integrating RAG with reasoning. Building on this, this section presents two core implementation patterns for RAG-reasoning synergy (Figure 5): (1) the Pre-defined Workflow, which uses logical architectures with preset rules for coordination, and (2) Dynamic Workflow, which relies on context-aware, adaptive coordination via real-time decision engines. These patterns illustrate current frameworks combining knowledge retrieval and multi-step reasoning from deterministic and flexible perspectives. + +# 4.1 Pre-defined workflow + +Pre-defined workflow is a multi-step reasoning approach with a fixed architecture and sequential execution, emphasizing process clarity and operational determinism. It consists of predefined iterative stages, each with strict input-output rules and no dynamic changes based on intermediate results. This modular design ensures controllability and structured reasoning for complex tasks. All steps are executed regardless of intermediate outcomes, guaranteeing repeatability and stability while avoiding uncertainties from dynamic decisions. Although it sacrifices adaptability, this approach offers procedural predictability and is well-suited for scenarios demanding clear reasoning paths, albeit with possible computational redundancy due to lack of real-time adjustments. + +Mathematically, the pre-defined RAG workflow can be formalized as a deterministic multi-step operational chain. Given an input query $Q$ and a predefined sequence of $N$ reasoning steps and the final decision output $D$ , the complete workflow is expressed as: + +$$ +D = f _ {N} \circ \dots \circ f _ {2} \circ f _ {1} (Q) \tag {1} +$$ + +![](images/044160498b6a6dec3d4b731753eb83312cd53fabf064ad53ce6793173d12947b.jpg) +Figure 5. Patterns of Synergy between RAG and Reasoning + +where each $f_{i}\in \{\Psi ,R,\Gamma \}$ denotes strictly defined functions for reasoning $(\Psi)$ , retrieval $(R)$ , or decision-making $(\Gamma)$ , with $\circ$ representing function composition. This formulation adheres to the fixed mapping sequence $Q\mapsto \Psi (Q)\mapsto R(\Psi (Q))\mapsto \Gamma (R(\Psi (Q)))$ , exhibiting Markovian properties where $f_{t + 1}$ depends solely on $f_{t}$ 's output while remaining independent of historical states $\{f_{< t}\}$ . The chained composition guarantees process closure and reproducibility, though constrained by the static combinatorial nature of $\{f_i\}_{i = 1}^N$ . + +In the pre-defined pipeline, based on the position where reasoning is introduced, it can be further divided into Pre-Retrieval, Post-Retrieval, and Hybrid. + +4.1.1 Pre-Retrieval Reasoning. For pre-retrieval methods, the sequence is explicitly defined as + +$$ +D = \Gamma \circ \mathcal {R} \circ \Psi (Q) \tag {2} +$$ + +where $\Psi$ denotes a reasoning operator that systematically transforms or enriches the query prior to retrieval. This paradigm enhances retrieval precision by resolving ambiguities, inferring implicit intents, or optimizing query representations. Current research identifies four principal methodological categories for designing $\Psi$ : + +Query Optimization focuses on generating and selecting query variants to maximize retrieval relevance. Mathematically, this is formalized as Candidates $=$ Generate(Q,C), $\Psi_{\mathrm{Optimize}}(Q,C) = \arg \max_{\mathrm{candidate} \in \mathrm{Candidates}}$ Score(candidate), where (Generate) produces candidate queries and (arg max) selects optimal variants based on contrastive training or reinforcement learning. Representative implementations, such as LeReT [34], leverage iterative sampling and optimization to balance query diversity and specificity. + +Attribute Judgment employs classification mechanisms to dynamically regulate retrieval triggers. This is modeled as $\Psi_{\mathrm{Classify}}(Q) = \mathrm{Classify}(Q)$ , where Classify evaluates query + +attributes (e.g., temporal sensitivity, intent complexity) against predefined criteria. Frameworks like UAR [14] and AdaptiveRAG [41] exemplify this approach by integrating multistage classifiers to minimize unnecessary retrievals. + +Plan Generation decomposes complex queries into structured sub-task sequences to guide retrieval direction. Formulated as $\Psi_{\mathrm{Plan}}(Q) = \mathrm{Plan}(Q)$ , the operator Plan generates hierarchical task decompositions, as seen in PlanRAG [48], which utilizes chain-of-thought reasoning to align retrieval targets with multi-step problem-solving requirements. + +Semantic Enhancement enriches query representations using domain-specific or task-aware embeddings. Expressed as $\Psi_{\text{Enhance}}(Q) = \text{Encode}(Q, \mathcal{K})$ , where $\mathcal{K}$ denotes auxiliary knowledge (e.g., reasoning trajectories), methods like O1-Embedder [101] integrate latent reasoning patterns into query embeddings to improve retrieval robustness. + +Collectively, these methodologies demonstrate that pre-retrieval reasoning serves as a systematic interface to mitigate semantic gaps between raw queries and knowledge bases, establishing a critical component for precision-driven RAG architectures. + +4.1.2 Post-Retrieval Reasoning. In pre-defined RAG systems with multi-step reasoning pipelines, the post-retrieval reasoning paradigm represents a critical advancement where cognitive processing occurs after information retrieval from external sources. This approach addresses inherent limitations in conventional RAG, particularly in managing knowledge conflicts, mitigating information insufficiency, and enhancing logical consistency across complex reasoning tasks. Mathematically, this process can be formalized as a deterministic function composition: + +$$ +D = \Gamma \circ \Psi \circ \mathcal {R} (Q) \tag {3} +$$ + +$\mathcal{R}$ denotes the retrieval operator, $\Psi$ implements the reasoning transformation, and $\Gamma$ represents the final decision function. + +The core characteristic of Post-Retrieval Reasoning lies in its execution of the reasoning process after retrieval, with the reasoning target being the retrieved content. ToG2.0 [60] proposes an iterative multi-step reasoning framework that alternates between graph retrieval and context retrieval, integrating the reasoning judgment of LLMs to progressively expand entities and prune irrelevant information, ultimately generating accurate answers. This approach dynamically addresses the issue of insufficient information through iterative refinement while establishing a dual-evidence verification mechanism via knowledge graph relation pruning and entity-guided context retrieval. Its graph-structured reasoning module transforms the connectivity validation of triple paths into a constraint satisfaction problem, effectively mitigating logical inconsistencies between text fragments and thereby significantly improving the quality of complex question answering. + +ActiveRAG [100], on the other hand, employs a predefined three-stage process (Self-Inquiry $\rightarrow$ Knowledge Assimilation $\rightarrow$ Thought Accommodation) to structurally comprehend and calibrate retrieved knowledge, resolving conflicts between parametric memory and external knowledge. During the Knowledge Assimilation stage, ActiveRAG enhances the corrective effect of external knowledge on the internal representations of LLMs through multi-instruction fine-tuning strategies (e.g., counterfactual comparison and anchor association), substantially reducing the likelihood of hallucination generation. ARM's [7] structural alignment and self-verification stages also demonstrate optimization for post-retrieval reasoning. By incorporating domain knowledge via mixed-integer programming (MIP) solvers, ARM ensures the rationality and coverage of retrieval results, providing a scalable optimization framework for multi-source data compatibility and thereby enabling globally optimal cross-modal retrieval. + +4.1.3 Hybrid Reasoning. The Hybrid pattern of pre-defined process forms a composite processing paradigm by integrating pre-retrieval reasoning with post-retrieval reasoning. The essence is formalized as a multi-round recursive iterative process, where each iteration cycle strictly comprises three phases: Retrieval, Generation, and Reasoning, executed as structured composite operations. Let the total number of iterations be $T$ ; the workflow is defined as: + +$$ +Q _ {T} = \left(\bigcirc_ {t = 1} ^ {T} \mathcal {R} _ {\square} \circ \Gamma_ {t} \circ \Psi_ {t}\right) \left(Q _ {0}\right) \tag {4} +$$ + +Here, each iterative unit is indexed by $t$ . The process terminates when a predefined condition $\mathcal{T}(Q_t, D_t, C_t)$ is met, yielding the final response $\Gamma_{\mathrm{final}}(C_T)$ . This recursive mechanism enables dynamic synergy between knowledge acquisition and semantic inference, overcoming the linear limitations of single-cycle retrieval-generation frameworks. + +IR-CoT [78] leverages chain-of-thought reasoning to iteratively construct intermediate logic chains, enabling multi-hop retrieval guided by progressively refined contextual cues. FinSearch [50] introduces a dual-phase architecture that first generates structured search graphs to model temporal and entity dependencies, followed by dynamic query rewriting to optimize financial data retrieval. LevelRAG employs hierarchical validation mechanisms, aggregating multi-granular retrieval results and triggering supplementary retrievals based on context completeness assessments. ITER-RETGEN [68] utilizes generation-enhanced feedback loops to iteratively refine query representations, enhancing semantic alignment between retrieval and generation phases. + +These approaches share a common foundation in structured recursion while diverging in operational mechanisms. By enforcing deterministic iteration cycles, they balance controlled workflow execution with adaptive semantic exploration, addressing challenges such as multi-step reasoning, temporal coherence, and cross-domain knowledge synthesis. The hybrid paradigm's strength lies in its capacity to decompose complex queries into iterative retrieval-generation units, systematically bridging knowledge gaps while maintaining interpretability and robustness in open-domain problem-solving scenarios. + +# 4.2 Dynamic RAG Workflow + +The RAG with dynamic workflow represents an autonomous reasoning architecture centered around LLMs, characterized by the integration of non-deterministic operational workflows and real-time decision-making capabilities. Unlike predefined pipelines, this architecture enables continuous monitoring of reasoning states to dynamically trigger retrieval, generation, or verification operations. The LLM actively evaluates contextual demands during reasoning processes, autonomously determining optimal moments for invoking external tools or resources through a hybrid feedback coordination mechanism. By eliminating fixed iterative units and pre-determined tool-calling sequences, the framework achieves dynamic evolution of execution pathways, demonstrating superior adaptability in complex cognitive tasks through real-time adjustment of computational workflows based on intermediate reasoning outcomes. + +This dynamic architecture manifests three principal characteristics: 1) Operator invocation is governed by the LLM's contextual state analysis, exemplified through special token prediction (e.g., '[Web-Search]' or `') to initiate external operations; 2) Reasoning trajectories exhibit high flexibility, allowing dynamic query reformulation and sub-problem generation to overcome limitations of static workflows; 3) Context-driven decision mechanisms prioritize real-time reasoning states over predefined rules, enhancing systemic responsiveness to emergent task complexities while improving precision. + +Defining the reasoning state at time $t$ as $S_{t} = (H_{t}, C_{t})$ , where $H_{t}$ denotes historical information aggregation and $C_{t}$ represents contextual embedding vectors, the decision process is modeled as a stochastic system: + +$$ +a _ {t + 1} \sim \pi \left(S _ {t}; \Theta\right) \tag {5} +$$ + +$$ +S _ {t + 1} = \delta \left(S _ {t}, \mathcal {T} _ {a _ {t + 1}} \left(S _ {t}\right)\right) \tag {6} +$$ + +Here, $\pi : S \to \Delta(\mathcal{A})$ constitutes the policy function mapping states to probability distributions over action space $\mathcal{A}$ (retrieval, generation, verification, etc.), while $\mathcal{T}_a$ denotes state transition functions corresponding to action $a$ . The non-Markovian nature of the system emerges from $S_{t+1}$ 's dependence on complete historical trajectories $\{S_{\leq t}\}$ , with dynamic adaptability ensured through extensible action spaces $\mathcal{A}$ and online optimization of policy parameters $\Theta$ . This formulation enables context-sensitive state updates via $\delta : S \times \mathcal{O} \to S$ , establishing a theoretical foundation for open-ended reasoning processes in complex problem domains. + +Based on the mode of reasoning initiation, agentic RAG with dynamic workflows can be further categorized into three distinct types: Proactivity-driven, Reflection-driven, and Feedback-driven mechanisms. The LLM proactivity-driven approach is characterized by the model's autonomous triggering of actions based on internal assessments, executing operations without external intervention through mechanisms analogous to human intuitive decision-making—for instance, when the model independently identifies insufficient evidentiary support in the current reasoning process, it proactively generates retrieval requests to supplement information. The reflection-driven mode emphasizes self-examination of the reasoning process, dynamically initiating subsequent operations through quantitative evaluation of intermediate result quality (e.g., triggering actions when the calculated reasoning support score of 0.7 exceeds a predefined threshold of 0.6), which simulates the self-optimization logic of expert systems, enabling the model to adjust reasoning pathways through introspection. The feedback-driven mechanism incorporates external intervention, employing independent models or rule-based systems to perform real-time scoring of intermediate states (e.g., an external reward model assigning a 2.5/5 score to reasoning steps) while providing corrective suggestions, operating similarly to a mentor-guided mode that continuously calibrates the reasoning workflow through external feedback signals. + +4.2.1 Proactivity-Driven Reasoning. The core innovation of Proactivity-driven Reasoning lies in enabling LLMs to fully govern the reasoning process through self-triggered prediction mechanisms. This active control manifests through three key mechanisms: (1) direct tool invocation via model-generated special tokens (e.g., [Web-Search]), without external intervention, (2) context-aware decision making based + +on real-time knowledge gaps or hypothesis verification requirements, and (3) Markov Decision Process (MDP)-based dynamic path optimization. + +Formally, the reasoning process can be modeled as a state sequence $S = \{s_0, s_1, \ldots, s_t\}$ , where each state $s_t$ encapsulates the current reasoning context. At each step $t$ , the LLM selects an action $a_t \in \{\text{retrieve, generate, terminate}\}$ based on $s_t$ , executes the corresponding operation (e.g., document retrieval or answer generation), and updates its state through transition function $s_{t+1} = \delta(s_t, a_t, o_t)$ where $o_t$ represents action outcomes. This MDP framework enables dynamic path adjustment through real-time feedback until termination ( $a_T = \text{terminate}$ ) and final answer generation. + +Recent advancements demonstrate significant improvements over conventional RAG approaches. The Agentic Reasoning framework achieves granular control through dynamic tool invocation, eliminating predefined execution sequences. DeepRAG [24] optimizes cost-accuracy tradeoffs via MDP-based imitation learning, addressing the retrieval-generation disconnection in traditional systems. CoRAG [83] introduces hybrid-driven mechanisms combining LLM-initiated subqueries with external policy control, enhancing error tolerance for complex queries. Collectively, these approaches establish a paradigm shift from fixed pipelines to context-sensitive, self-optimizing reasoning architectures. + +4.2.2 Reflection-Driven Reasoning. The reflection-driven mechanism represents a dynamic reasoning framework that enables iterative self-evaluation and revision of intermediate outputs through model introspection. Common methods include: (1) a evaluation system combining explicit token prediction and implicit confidence scoring, (2) self-monitoring capabilities through grounding tokens for content-document consistency verification and utility tokens for answer effectiveness assessment, and (3) adaptive routing mechanisms that automatically select single-hop or multi-hop reasoning paths based on contextual complexity. The mathematical formalism of this process can be expressed as: + +$$ +\mathcal {P} = \bigcup_ {t = 1} ^ {T} \left[ G \left(\mathbf {C} _ {t}\right)\rightarrow E \left(\mathbf {H} _ {t}, \mathcal {D}\right)\rightarrow \psi \left(\phi \left(\mathbf {e} _ {t}\right), \tau\right)\right] \tag {7} +$$ + +where $G$ denotes the generation function operating on current context $\mathbf{c}_t$ , $E$ represents the evaluation function that assesses hidden states $\mathbf{h}_t$ against external knowledge base $\mathcal{D}$ , $\phi$ serves as the confidence mapping function, $\tau$ is the decision threshold, and $\psi$ functions as the branch selector. + +In practical implementations like Self-RAG [3], this framework generates candidate responses alongside reflection tokens, computes passage relevance scores (ISREL $\in$ [0,1]) and factual support metrics (ISSUP), and employs weighted aggregation of token probabilities in $\phi$ to determine retrieval activation or generation revision through threshold-based $\delta$ operations. Meanwhile, Open-RAG [38] incorporates hybrid threshold mechanisms and Mixture-of-Experts architecture + +to enforce counterfactual verification through non-retrieval confidence scoring $(\mathrm{Pr}_{\mathrm{NoRT}})$ , enabling dynamic expansion of complex reasoning capabilities while preserving base model efficiency. ReaRAG [49] utilizes knowledge-guided reasoning chains combined with external knowledge sources to perform reflection-driven reasoning. In each iteration, it adjusts the reasoning path through the "Thought-Action-Observation" paradigm, effectively preventing error propagation and improving answer accuracy. + +The paradigm's innovation lies in reconstructing traditional sequential processes into conditional Markov decision processes, where state transition probabilities $P(s_{t + 1}|s_t)$ are dynamically determined by model self-evaluation outcomes. Compared to proactive LLM-driven methods (e.g., Toolformer's direct API invocation), the reflection-driven approach establishes closed-loop control through explicit evaluation stages (function $E$ ), effectively mitigating hallucination risks while maintaining computational efficiency. + +4.2.3 Feedback-Driven Reasoning. The feedback-driven dynamic RAG system establishes closed-loop control over reasoning processes through external signals, formally modeled as a Partially Observable Markov Decision Process. The system state $s_t = (q_t, \mathcal{K}_t, H_t)$ evolves through iterative interactions, comprising the current query representation $q_t$ , dynamic knowledge base $\mathcal{K}_t$ , and historical trajectory $\mathcal{H}_t$ . Initialized with $q_0$ and $\mathcal{K}_0 = \emptyset$ , the policy function $\pi(a_t | s_t)$ generates actions from the operational space $\mathcal{A} = \{\text{Retrieive}, \text{Reason}, \text{Verify}, \text{Answer}, \emptyset\}$ . State transitions follow $s_{t+1} = \delta(s_t, a_t)$ with knowledge base updates + +$$ +\mathcal {K} _ {t + 1} = \mathcal {K} _ {t} \oplus \operatorname {R e t r i e v e} \left(q _ {t}\right) \cdot \mathbb {I} \left(a _ {t} = \text {R e t r i e v e}\right) \tag {8} +$$ + +where $\oplus$ denotes incremental updates and $\mathbb{I}$ represents an indicator function. The reward function $R(s_{t},a_{t},s_{t + 1})\to r_{t}$ drives policy optimization through + +$$ +\pi_ {t + 1} = \Omega \left(\pi_ {t}, \nabla_ {\theta} \mathbb {E} _ {a \sim \pi_ {t}} \left[ R \left(s _ {t}, a, s _ {t + 1}\right) \right]\right) \tag {9} +$$ + +forming an adaptive control loop. Three distinct feedback mechanisms emerge within this framework. + +Explicit reward feedback employs specialized models $\pi_{\mathrm{reward}}$ for quantitative evaluation, exemplified by RAG-Gym's process rewards [96]. The reward function combines immediate and terminal rewards: + +$$ +r _ {t} = \lambda_ {1} \pi_ {\text {r e w a r d}} \left(s _ {t}\right) + \lambda_ {2} \mathbb {E} _ {s _ {t + k}} \left[ \gamma^ {k} R _ {\text {t e r m i n a l}} \right] \tag {10} +$$ + +with discount factor $\gamma$ . SmartRAG extends this through policy gradient optimization + +$$ +\nabla_ {\theta} J (\theta) = \mathbb {E} _ {\tau \sim \pi_ {\theta}} [ \sum_ {t = 0} ^ {T} \nabla_ {\theta} \log \pi_ {\theta} (a _ {t} | s _ {t}) \hat {A} _ {t} ] \tag {11} +$$ + +where the advantage function $\hat{A}_t$ integrates temporal feedback. + +Implicit environmental feedback derives from knowledge base validation, as implemented in KBQA-o1's SPARQL verification and SolutionRAG's pruning mechanisms [58]. + +This feedback is formalized as $r_t = \mathbb{I}(\mathcal{K}_t\models q_0)\cdot c_{\mathrm{valid}} - \mathbb{I}(\bot \in \mathcal{K}_t)\cdot c_{\mathrm{invalid}}$ with validation function $\mathbb{I}(\cdot)$ and penalty coefficients $c$ . ReARTeR [75] introduces threshold-triggered correction: when $r_t < \tau$ , it activates refinement loops $\mathcal{K}_{t + 1} = \mathrm{PEM}(\mathcal{K}_t,q_0)\oplus \mathrm{Retrieve}(\mathrm{PRM}(s_t))$ . + +Structured rule feedback encodes domain knowledge through differentiable scoring functions. MCTS-KBQA [97] implements depth-attenuated rewards + +$$ +r _ {t} = \frac {1}{1 + \alpha d _ {t}} \sum_ {i = 1} ^ {n} \mathrm {L L M} _ {\text {s c o r e}} \left(a _ {t} ^ {(i)}\right) \tag {12} +$$ + +with search depth $d_t$ and decay coefficient $\alpha$ . CR-Planner's hierarchical critique combines subgoal and execution scores: $r_t^{\mathrm{total}} = \beta_1\pi_{\mathrm{sub}}(s_t) + \beta_2\pi_{\mathrm{exec}}(a_t|s_t)$ through weighted fusion. + +These feedback mechanisms interact through a unified strategy update framework, where external feedback-driven approaches achieve controllable optimization of the reasoning process through interpretable feedback signals while maintaining the generative capabilities of LLMs. Overall, the dynamic process of RAG, by endowing the model with autonomy in the reasoning process, not only enhances adaptability to complex tasks but also provides a new solution for efficient reasoning in resource-constrained environments. + +# 5 Implementation and Optimization + +Building upon preceding sections, this section systematically analyzes the concrete implementation and optimization strategies for reasoning within the RAG paradigm. In contrast to existing surveys that predominantly focus on posttraining methodologies or isolated LLM reasoning mechanisms, our analysis maintains a dedicated focus on the synergistic integration of RAG with reasoning examining their co-adaptive implementations through a structural lens. + +# 5.1 Reasoning Process + +5.1.1 LLM CoT. Integrating Chain-of-Thought (CoT) reasoning with LLMs is key to combining RAG with complex reasoning tasks. Research shows CoT enhances RAG systems by explicitly guiding multi-step reasoning and dynamically incorporating external knowledge. For example, ActiveRAG [100] uses a "Self-Inquiry $\rightarrow$ Knowledge Assimilation $\rightarrow$ Thought Accommodation" chain to align knowledge and reasoning: a knowledge assimilation agent merges external documents with LLM memory via operations like association and reflection, creating structured knowledge. Meanwhile, a reasoning adaptation agent refines inference chains from Self-Inquiry to ensure answers align with retrieved knowledge and address reasoning gaps. Similarly, Adaptive-RAG [41] alternates between CoT and retrieval, breaking down multi-hop reasoning into steps such as entity localization and document correlation, refining retrieval and generation based on prior results. + +![](images/a797186982b7420dcac71a470f6aca1de11923d2ffcfd02c0fb32375430a9b11.jpg) + +![](images/cc011ed02bfe16c008fa59b27f259cd658bc4f9700c3e26493d504dd622d891f.jpg) +Figure 6. Implementation and optimization of the synergy between RAG and Reasoning + +At the knowledge and reasoning level, O1-Embedder [101] drives RAG through open-ended long-text reasoning, extending CoT beyond fixed triggers via coherent thought processes like problem decomposition. PlanRAG [48] explicitly uses CoT to produce executable multi-step plans, adjusting operations dynamically through a closed-loop "plan-execute-feedback" cycle. Despite different implementations, these methods share two CoT strengths: breaking down complex problems into clear intermediate steps and guiding external knowledge selection through reasoning states. Studies show these approaches outperform traditional RAG in multi-hop QA and knowledge-intensive tasks by enhancing both LLMs' reasoning and adaptability to external knowledge. + +5.1.2 Special Token Prediction. Recent advances active RAG also highlight special token prediction as a key method for dynamically linking external knowledge retrieval with multi-step reasoning [16]. By embedding domain- or action-specific tokens (e.g., '[Web-search]', '[Retrieve=Yes)', `') into LLM vocabularies, models can autonomously trigger tools or self-reflect during text generation. Frameworks like Self-RAG [3] and SmartRAG [20] use dedicated tokens ('Retrieve', 'ISREL', '[RETRIEVE]') to manage retrieval activation, relevance checks, and output verification, turning static reasoning chains into conditional workflows. The innovation lies in predicting these tokens within generated sequences, segmenting tasks into retrieval initiation, document evaluation, and knowledge grounding phases. + +Hybrid models such as Open-RAG [38] combine token control with mixture-of-experts (MoE) routing, sparsely activating experts aligned with token-predicted reasoning. Unlike traditional chain-of-thought or search tree methods, special token prediction offers finer control and interpretability by encoding decision logic explicitly in token sequences while maintaining end-to-end training. This approach also overcomes latency and inflexibility of preset retrieval schedules by enabling context-aware, on-demand tool use. For example, R1-Searcher [72] and Search-o1 [51] use token boundaries like `' to coordinate retrieval pauses and resume generation after knowledge integration. + +Together, these systems show that token-level prediction not only bridges reasoning and retrieval but also creates a scalable framework for tool-enhanced language agents, preserving generative fluency while enabling systematic external knowledge integration and procedural reasoning. + +5.1.3 Search-Driven Reasoning. Recent advancements in search-driven reasoning have significantly improved RAG frameworks by employing structured search strategies for dynamic information exploration and multi-step reasoning with external knowledge. Current approaches mainly follow three paradigms: tree-based search, MCTS, and reinforcement learning-optimized policy networks. + +Tree-based methods organize reasoning hierarchically through structured path exploration. For example, StePO-Rec [5] uses a multi-step tree-structured reasoning method + +that iteratively retrieves different outfit matching knowledge and user preferences at each node, ultimately achieving generative recommendations for complementary items. OmniThink [94] uses an information tree to expand topic analysis by generating subqueries that guide breadth-first or depth-first retrievals. DeepRAG [24] applies a binary tree search within a Markov decision process to explore parametric knowledge and retrieval paths in parallel, selecting optimal branches. DeepSolution's [54] bidirectional thinking tree alternates expanding solution and critique nodes with scoring for path pruning, aligning naturally with MCTS evaluation. These methods balance exploration efficiency with solution coverage through explicit tree structures. + +MCTS enhances robustness by optimizing long-term decisions via simulation, evaluation, and backpropagation. CR-Planner [52] integrates MCTS with the UCB strategy to balance exploration and exploitation while estimating optimal subgoals through multi-step simulations. KBQA-O1 [58] and MCTS-KBQA [97] generate candidate actions using policy models and combine reward models to globally assess logical forms, reducing local optima. ReARTeR [75] innovatively merges MCTS with procedural reward models (PRMs), interleaving retrieval and reasoning steps, and filtering high-reward paths to form a closed-loop "reason-retrieve-reason" cycle. These methods probabilistically explore paths and use reinforcement learning feedback to improve global reasoning for complex tasks. + +Reinforcement learning-optimized policy networks adaptively refine search strategies. LeReT [34] replaces fixed search algorithms with reinforcement learning (e.g., IPO) to dynamically optimize query generation based on rewards like retrieval accuracy, implicitly learning optimal search patterns without explicit tree or graph structures, thus offering greater flexibility and scalability. + +In summary, search-driven reasoning unites inference and retrieval through structured strategies, combining multi-path exploration, dynamic evaluation, and adaptive optimization to deliver interpretable, efficient solutions for knowledge-intensive tasks. Future work may focus on hybrid paradigms (e.g., integrating MCTS and reinforcement learning) and lightweight algorithms to balance performance with computational efficiency. + +5.1.4 Reasoning on Graph. Graph-structured reasoning offers a novel approach for multi-hop inference in RAG systems by explicitly modeling knowledge interaction paths through topology. Current methods fall into two categories: query-flow-oriented search graphs (e.g. FinSearch [50]) and knowledge-association-based expansion graphs (ToG-2.0 [60]) FinSearch builds a directed acyclic graph (DAG) where nodes are atomic subqueries (e.g., stock prices, financial reports) and edges capture logical and temporal dependencies. A pre-planner breaks down queries into subquery sequences, + +using graph traversal to control information flow and dynamically adjust paths, such as backtracking when conflicts arise—substantially surpassing linear chain-of-thought methods in handling complex logic. + +5.1.5 External Solver. The integration of RAG and reasoning is also can be achieved by incorporating external solvers, where specialized solvers, such as the Alignment-Oriented LLM-based Retrieval Method (ARM), are employed to handle the reasoning component. The retrieval process for complex problems is formulated as a global optimization task, leveraging external solvers like mixed-integer programming (MIP) to achieve structural alignment and joint optimization of data objects. Specifically, ARM first decomposes user queries into keywords that match N-grams in the dataset through an information alignment module, generating an initial set of retrieval candidates via constrained decoding. Subsequently, in the structural alignment phase, the MIP solver performs global filtering on candidate objects based on a predefined objective function that maximizes both the relevance of retrieved objects to the query and their mutual compatibility. This ensures that the selected objects not only cover the requirements of the query but also form a coherent information chain through entity or inter-table linkages. Finally, the self-verification mechanism of the LLM, combined with a beam search-based aggregation strategy, dynamically refines and consolidates multiple candidate sets, ultimately producing a retrieval collection that satisfies both semantic matching and the structural organization of the data. + +ToG-2.0 achieves multi-hop expansion by integrating knowledge graphs with documents, starting from an initial entity and iteratively extending relevant entities and relations (such as corporate ownership chains and technology dependency networks) via the Edge function. This process constructs structured triple paths while simultaneously retrieving and verifying document content. By tuning the width and depth parameters, the method emulates human reasoning: broadly exploring potential associations before deeply verifying high-confidence paths. FRAG [23] dynamically adjusts retrieval strategies by predicting the hop range of reasoning paths based solely on the query text, thereby enhancing retrieval quality without requiring additional fine-tuning or invocation of large language models, enabling flexible and efficient retrieval optimization. FG-RAG [32] further expands entity coverage in graph retrieval through context-aware entity expansion, providing richer background information. Combined with query-level fine-grained summary generation, FG-RAG transforms coarse-grained graph information into highly relevant detailed content, effectively improving the performance of query-focused summarization tasks. + +Although differing in design from workflow-based methods, ToG-2.0 shares key advantages with other graph-structured approaches: explicitly modeling reasoning state dependencies, supporting dynamic path generation and optimization, + +and enabling closed-loop interaction between retrieval and reasoning. This effectively overcomes the limitations of traditional RAG in implicit relation inference and counterfactual analysis, thereby establishing an interpretable theoretical and practical framework for knowledge reasoning. + +# 5.2 Reasoning Optimization + +In the previous chapter, we focused on introducing several approaches to integrate reasoning with RAG. This chapter shifts attention to how to augment the reasoning capabilities, specifically including Prompt-Based, Tuning-Based, and RL-Based strategies. + +5.2.1 Prompt-Based. Prompt-Based optimization is a key approach to improving RAG and reasoning system performance by using carefully designed natural language prompts. These prompts break down complex reasoning tasks into manageable steps and guide LLMs to follow specific logical structures during generation. The main advantage is that control over reasoning flow is achieved solely through prompt design, without parameter fine-tuning or reinforcement learning, preserving the model's generalization while enhancing task-specific results. + +This approach has three main features. First, task structuring: prompts explicitly decompose and control reasoning chains via zero-shot or templated designs. Techniques like Co-STORM [43] and WriteHere [98] use role assignments, stage divisions, and operation-specific instructions to guide multi-step reasoning—such as proposal generation, knowledge retrieval, refinement, and validation—improving interpretability by representing intermediate steps clearly. + +Second, result reliability is improved by standardizing outputs and reducing hallucinations. Strategies include requiring citation of retrieval results, enforcing specific output formats, and integrating reflection and calibration based on retrieved knowledge. Systems like FinSearch [50] and ActiveRAG [100] incorporate temporal weighting, deduplication, and domain rules through prompts, enhancing consistency and logical coherence, especially in complex domains. + +Third, interactive adaptability allows dynamic prompt adjustments. Special tokens (e.g., , [Web-search]) enable models to trigger tools or revise queries in real time based on intermediate results. Methods such as Agentic Reasoning [92] and PlanRAG [48] use context-sensitive prompts and feedback loops to refine reasoning paths dynamically, maintaining coherence and accuracy in multi-hop tasks and outperforming traditional RAG methods in complex, evolving scenarios. + +In summary, prompt-based optimization offers an efficient, flexible, and reliable approach to enhancing RAG+Reasoning by emphasizing task structuring, result standardization, and interactive adaptability. Its non-intrusive and broadly applicable design has established it as a mainstream strategy for optimizing LLM reasoning and serves as a foundation + +for future hybrid methods integrating fine-tuning and reinforcement learning. By systematically optimizing reasoning without altering model parameters through semantic structures, dynamic feedback, and symbolic constraints, this paradigm effectively manages macro-level controls like task decomposition and knowledge integration while addressing key challenges such as generation consistency, logical coherence, and external knowledge alignment. This makes prompt-based optimization a lightweight yet powerful solution for complex reasoning tasks. + +5.2.2 Tuning-Based. The tuning-based approach improves the integration of RAG and reasoning by optimizing model parameters to internalize the retrieval-augmented chain-of-thought mechanism within LLMs. Current research mainly targets three goals: retrieval pathway optimization, structured generation enhancement, and collaborative training with external modules. + +For retrieval pathway optimization, methods like CoRAG [83] and DeepRAG [24] build end-to-end multistep reasoning frameworks through full parameter fine-tuning and multitask learning. CoRAG expands single-step QA datasets into retrieval-reasoning chains and jointly trains tasks such as sub-query generation, intermediate answer prediction, and final composition. This boosts the model's ability to break down complex problems (e.g., multi-entity relational reasoning) and adapt retrieval strategies dynamically (e.g., query rewriting, error correction). DeepRAG combines imitation and contrastive learning with binary tree search to create efficient retrieval paths, using a DPO-style contrastive loss to reduce redundant retrieval while maintaining accuracy. + +To improve structured generation, MCTS-KBQA [97] and Self-RAG [3] fine-tune models for precise special token generation. MCTS-KBQA uses supervised fine-tuning to make large language models output instructions that comply with knowledge graph protocols (e.g., SPARQL), modeling reasoning as executable tool-call sequences. Self-RAG enhances self-supervised generation control by expanding vocabulary and training the model to generate reflection tokens like retrieval triggers and relevance markers, preserving fluency and reducing factual errors. Additionally, O1-Embedder [101] and Open-RAG [38] align semantic spaces via mixed fine-tuning: O1-Embedder combines generative and contrastive training with special tokens to separate generation from embedding tasks, enhancing multihop semantic understanding; Open-RAG uses QLoRA [17] quantized fine-tuning and Mixture of Experts (MoE) modules to specialize networks for single/multi-hop reasoning. + +In collaborative optimization with external modules, AdaptiveRAG [41] and CR-Planner [52] apply parameter isolation to balance generality and adaptability. AdaptiveRAG finetunes a lightweight classifier to select retrieval strategies dynamically. CR-Planner introduces a Critic model trained with contrastive loss on MCTS trajectory data to assess the + +Table 1. Comparison of RL-based RAG with Reasoning Methods + +
MethodBase ModelRLParameterSupervisionReward FunctionPolicy Strategy
PORAG [73]Qwen2.5/Llama3.2GRPOQLRAORMDual rewards: +1. Retrieval fidelity (Rfid) +2. Response quality (Rqual) +Combined: R = αRfid + βRqual• Group-based advantage normalization +• PPO-style clipped objective +• KL regularization
DeepResearcher [106]Qwen2.5-7BGRPOFullORMFormat compliance penalty (-1) + Answer F1 score• Reference policy constraints +• KL divergence penalty
ReSearch [6]Qwen2.5-7BGRPOFullORMHybrid rewards: +• Answer F1 (vs ground truth) +• Format compliance check• GRPO with clip ratio 0.2 +• Group advantage normalization (G=5) +• β = 0.001 KL penalty
ReZero [16]Llama3.2-3BGRPOFullORM+PRM• Answer correctness +• Format compliance +• Search diversity +• Chunk matching +• Retry behavior +• Strategy compliance• Intra-group reward comparison +• Noise-injected robustness training +• KL constraints
MMOA-RAG [12]Llama-3-8BMAPPOFullORMShared F1 reward + penalties: +• Excessive sub-questions +• Document ID errors +• Answer hesitability• MAPPO actor-critic updates +• Cosine learning rate scheduling
DeepNote [84]Qwen2.5/Llama3.1DPOFullORMImplicit preference modeling via likelihood contrast• Direct Preference Optimization +• Preference gap maximization
R1-Searcher [72]Qwen2.5/Llama3.1Reinforce++FullORMTwo-stage rewards: +1. Retrieval count + format +2. F1 score + format penalty• RAG-based rollout +• Retrieval-masked loss
KBQA-O1 [58]Llama3/Qwen2.5/Gemma2MCTSDoRAORM+PRMComposite reward: +• Stepwise policy model score +• Final reward model score• MCTS trajectory optimization +• Q-value backpropagation
DeepRetrieval [42]Qwen2.5-3BPPOFullORMTask metrics: +• Recall@k/NDCG +• Syntax validity• GAE advantage estimation +• Distributed HybridFlow
LeReT [34]Llama3-8B/Gemma-9BIPOFullPRMAverage Precision (AP) of retrieved documents• Identity Policy Optimization +• Context distillation
SmartRAG [20]Flan-T5-L/Llama2-7BPPOFull/LoRAORMAction-specific: +• EM+F1 for answers +• Cost penalty for retrievals• On-policy sampling +• PPO updates
ReARTeR [75]LLaMA3.1-8BMCTSLoRAORM+PRMMonte Carlo step scoring + TD look-ahead• Iterative preference optimization +• KTO loss
DeepRAG [24]Qwen2.5-7B/Llama3.1-8BHybridFullORM+PRMCost-aware accuracy: +R = -C(o) × T(st) +C(o): Answer correctness +T(st): Total retrieval cost• Imitation + contrastive learning +• PPO-like calibration
RAG-Gym [96]LLaMA3.1-8BHybridLoRAPRMTriple criteria: +• Sufficiency +• Utility +• Redundancy• SFT + DPO +• PRM-guided selection
CR-Planner [52]Skywork-Llama3.1-8BMCTSLoRAPRMCritic-estimated rewards: +• Stepwise correctness +• Global impact• MCTS simulation +• Pairwise ranking loss
+ +1ORM: Outcome-based Reward Model; PRM: Process-based Reward Model. 2Full: Full parameter tuning. + +long-term value of reasoning actions, prioritizing efficient solutions in tasks like mathematical reasoning. + +Together, these tuning strategies restructure the parameter space to internalize retrieval-reasoning interactions effectively, enhancing the model's ability to solve complex problems while ensuring computational efficiency and broad applicability across domains. + +5.2.3 RL-Based. As shown in Table 1, Reinforcement learning (RL) has recently become pivotal for tackling long-chain + +reasoning in modern inference models and optimizing RAG combined with reasoning tasks. Central to these advances is the use of dynamic reward mechanisms that guide LLMs to balance knowledge retrieval and logical reasoning adaptively. RL optimization objectives generally fall into two categories: outcome-based reward modeling (ORM) and process-based reward modeling (PRM), with some hybrid approaches blending both to balance global goals and local optimizations. + +The ORM paradigm focuses solely on the quality of the final output and its adherence to standards. For example, R1-Searcher [72] employs a two-stage Reinforce++ [35] training where rewards in the first stage depend on correct retrieval calls and special token generation, while the second stage directly optimizes the F1 score of answers. This encourages the model to develop strategies maximizing knowledge integration, reducing hallucinations, and enhancing accuracy in multi-hop QA beyond traditional RAG methods. Similarly, KBQA-O1 [58] uses MCTS with a policy network for candidate reasoning paths and a reward model evaluating logical consistency, effectively balancing exploration and exploitation in knowledge base QA. + +Conversely, PRM emphasizes detailed supervision of intermediate reasoning steps. LeReT [34] uses the Identity Policy Optimization (IPO) algorithm, optimizing query quality by rewarding average precision (AP) of retrieved documents, boosting retrieval recall and overall multi-hop task performance. ReARTeR [75] extends this with a step-level binary reward model, combining Monte Carlo scoring and temporal difference (TD) methods to evaluate reasoning paths proactively, reducing logical errors and redundant retrievals, and improving accuracy on benchmarks like HotpotQA. + +Moreover, influenced by DeepSeek-R1, GRPO [69] is also gradually being applied in scenarios combining RAG and Reasoning. GRPO is a variant of the Proximal Policy Optimization (PPO) reinforcement learning algorithm that abandons the critic model and instead estimates the baseline from group scores, significantly reducing training resources. For example, ReZero [16] uses GRPO to introduce a "retry" mechanism for LLMs, incentivizing LLMs to keep trying after an initial search failure by rewarding retry search queries. This mechanism simulates the human strategy of "if at first you don't succeed, try again" in information retrieval. PORAG [73], based on GRPO, directly optimizes retrieval quality, contextual relevance, and generation coherence through a dual reward mechanism (retrieval fidelity and response quality). + +Hybrid methods merge ORM and PRM to optimize both final outcomes and intermediate steps via composite rewards. SmartRAG [20] applies Proximal Policy Optimization (PPO), combining answer-level F1 rewards with penalties for excessive retrievals, balancing knowledge completeness and efficiency. RAG-Gym [96] advances this with multidimensional process rewards (sufficiency, utility, redundancy) and techniques like contrastive loss and Best-of-N sampling to promote efficient search decisions, even zero-shot. These hybrid strategies markedly lower retrieval costs while sustaining accuracy in complex tasks. + +In addition, we can also observe that in current RL-based methods, academia focuses more on exploration with small-scale LLMs (<8B), among which the Qwen and Llama series are the most widely used. Overall, RL provides a flexible, scalable framework for integrating RAG and reasoning. ORM + +guides the discovery of globally optimal strategies, PRM enhances reasoning robustness via local refinements, and their combination addresses modular system limits. Future work may explore collaborative rewards in multi-agent settings, offline RL based on world models, and hierarchical reward decomposition for open-domain applications. + +# 6 Downstream Tasks and Evaluation + +While previous chapters focused on methodologies and advances in RAG combined with reasoning, this chapter shifts to tasks and evaluation. It provides a comprehensive overview and analysis of existing tasks, datasets, their current status, and emerging trends. By reviewing these resources, we highlight the landscape's gaps and limitations in current evaluation methods. The chapter also explores key challenges in assessment frameworks, identifying shortcomings and suggesting potential improvements. + +![](images/1e880ccde31a88477a8599518908e126cc979da6226fba098d13340d6687a5c6.jpg) +Figure 7. The current downstream tasks and datasets related to the combination of RAG and Reasoning show that multi-hop question answering tasks still dominate. Correspondingly, HotpotQA, 2WikiMultihopQA, and MuSiQue remain the most commonly used evaluation datasets. + +# 6.1 Knowledge-Intensive Tasks + +In the evaluation for RAG systems, knowledge-intensive question answering (QA) remains the primary focus (Figure 7). As LLMs improve in semantic understanding and reasoning, benchmarks have expanded to cover tasks from simple fact retrieval to complex multi-step reasoning. However, evaluation methods specifically designed for RAG lag behind due to the dual challenge of assessing both retrieval-generation coherence and adaptability to dynamic knowledge bases. For example, multi-hop QA requires integrating + +dispersed knowledge through multi-stage retrieval while verifying logical consistency between answers and retrieval paths. This complexity increases dataset construction costs compared to purely generative tasks, keeping research centered on knowledge-intensive QA subcategories such as open-domain QA, knowledge-base QA, and multi-hop QA. + +Commonly used datasets include Natural Questions (NQ) [47] for single-hop factual queries, HotpotQA, 2WikiMultiHopQA [31] and Musique [79] for multi-hop QA. These benchmarks are mostly based on Wikipedia and fail to reflect the RAG demands and corresponding complexity in real-world scenarios. Some efforts have pushed evaluation boundaries, like CRUD-RAG's [59] operational metrics and DomainRAG's [86] domain-specific evaluations, but high costs and metric-task interdependencies limit progress. As a result, knowledge-intensive QA remains central for testing RAG robustness and practicality, highlighting a critical bottleneck: the need for innovative frameworks that balance retrieval flexibility and controlled generation to support new developments like Agentic RAG. Overall, many evaluation benchmarks are lagging behind rapid RAG+Reasoning advances, especially as LLMs grow more powerful. Specifically, the current evaluation of RAG faces the following challenges. + +Limited Challenge. With improving LLM capabilities, many knowledge-based questions are no longer difficult, as they can be answered without external retrieval. Current multi-hop reasoning datasets, often built from artificial templates, offer limited challenge. There is an urgent need for more complex datasets reflecting real-world scenarios and practical use. + +Lack of Specificity. Existing evaluation tasks are still predominantly focused on factual assessment and knowledge retrieval, lacking evaluations that probe deeper analytical thinking. This constraint limits the ability to measure a model's capacity for profound reasoning and cognitive depth. + +Task Uniformity. The majority of benchmarks are overly dependent on QA tasks, focusing on reactive, question-and-answer-based interactions. There is a pressing need to introduce tasks aligned with real-world applications, such as active information retrieval tasks based on personal knowledge or proactive knowledge discovery. + +Insufficient Dimensions. Evaluations are primarily end-to-end, focusing solely on final outcomes. However, with the introduction of reasoning processes, RAG+Reasoning systems have become iterative, multi-step frameworks. Current evaluations are unable to assess intermediate reasoning steps or retrieval chains effectively. The absence of step-by-step supervision data limits both research and training of related methods. Furthermore, current evaluation methodologies lack comprehensive assessments of system performance + +trade-offs, such as computational cost and efficiency, which are critical for practical deployment. + +This emergent landscape necessitates the creation of a new generation of evaluation frameworks that can address these shortcomings. Such frameworks must not only ensure the adaptability of retrieval and the controllability of generation but also integrate intermediate reasoning evaluation and efficiency metrics, paving the way for the development of more robust and efficient RAG systems suited to diverse real-world applications. + +# 6.2 New Tasks on RAG+Reasoning + +Recently, combining RAG with reasoning has significantly improved models' ability to tackle more realistic and challenging tasks, raising the standards for evaluation methods. This subsection examines emerging tasks that assess their combined strengths, related tasks and datasets are shown in Table 2. Here, "emerging" refers not to entirely new tasks but to those with unprecedented complexity and demands. These include Deep Research tasks requiring multi-layered information integration and reasoning; PhD (Expert)-Level Complex Reasoning tasks targeting advanced scenario reasoning; and critical; domain-specific decision support tasks like medical diagnosis and legal analysis. Such tasks demand not only external knowledge retrieval but also logical consistency, coherence, and depth in reasoning. + +6.2.1 Deep Research. From the perspective of integrating RAG and reasoning, Deep Research tasks exemplify complex downstream applications. They require models to handle open-ended retrieval, produce long-form, structured text, and synthesize multi-source information through deep reasoning. This section analyzes their key features, evaluation datasets, and metrics. + +At the core of Deep Research tasks lies the mission of addressing complex informational queries. These tasks are distinguished by several key attributes: + +First, dynamic interactivity is essential. Models engage in iterative dialogue to uncover latent user needs or "unknown unknowns". For example, the Co-Storm [43] framework enables collaboration with multiple language model agents to explore information gradually, easing user cognitive load and capturing unmet needs more accurately. + +Second, integrating information from multiple sources is crucial. Models must consolidate diverse data to provide comprehensive coverage. For instance, uses dynamic mind maps to structure knowledge and produce cohesive reports, ensuring accuracy and completeness. + +Third, expert-level accuracy is required. Many tasks demand domain expertise, expecting models to perform like human specialists. The Agentic Reasoning [92] framework illustrates this with high-stakes scenarios like medical treatment design or legal analysis, where outputs are judged on correctness, depth, and coherence. + +Table 2. Tasks and Datasets under the New Trend of RAG Combined with Reasoning + +
Task TypeSub-TaskDatasetDescriptionScaleConstruction ByEvaluationPaper
Deep ResearchDeep ResearchAgentic ReasoningPHD-level dataset covering finance, medicine, and law.15-30 domainsPhD ExpertsExpert pass rate[92]
Report Genera-tionWildSeek [44]Info-seeking task-goal pairs for document generation.100 samplesRules/LLM/ManualLLM[98]
Report Genera-tionTELL ME A STORY [37]fiction writing evaluation dataset: detailed prompts and long-form narratives.230 samplesManualLLM[98]
Peer ReviewReview-5k [91]ICLR 2024 peer review dataset: paper metadata and structured reviewer feedback.4,991 papersOpenReview/arXivMSE/MAE/Acc[91]
Report Genera-tionResearch-14k [91]2022-2024 Accepted ML pa-pers: outlines, full texts, and cited abstracts.14,911 papersSemantic Scholar + arXivSimulated review scores[91]
Report Genera-tionSolutionBench [54]Engineering benchmark: constrained solutions across 8 real-world domains.1,050 datapointsManual/LLM ex-tractionAnalytical/ Tech-nical scores[54]
Mathematics & ReasoningMath ReasoningGPQA [67]PHD-level MCQs in physics, chemistry, and biology.744 setsPhD ExpertsAccuracy[92]
Math ReasoningMATH500 [55]500 math problems from the MATH test set.500 problemsPublic reposPass@K[51]
ProgrammingLiveCodeBench [40]Programming benchmark with easy, medium, and hard problems.1,055 problemsCompetition plat-formsPass@K[51]
ProgrammingUSACO [70]USA Computing Olympiad problems, testing algorithms and coding.307 problemsUSA Computing OlympiadPass@K[52]
Math ReasoningTheoremQA-Math [33]BRIGHT subset: theorem-based math problems.206 problemsSTEM datasetsAccuracy[52]
ProgrammingGorilla [64]API-aware code generation from HuggingFace, Torch Hub, TensorFlow Hub docs.1,600 APIsManualAST matching[73]
Math ReasoningOlympiadBench [29]Olympiad-level math compe-tition problems.1,000 problemsCompetitionsAccuracy/F1[109]
Complex Reason-ingComplexWebQA [76]Multi-step reasoning over web queries with cross-document integration.34,689 queriesWeb snippetsAccuracy[36]
Demanding RetrievalDomain RetrievalStackEcon & Stack-Bio [33]Biology and economics StackExchange questions for complex retrieval.206 queriesStackExchangenDCG@K[52]
Active RetrievalAR-Bench [14]Active retrieval benchmark with four sub-tasks.8k/sub-taskSyntheticAccuracy[14]
Real-timeTAQA [104]QA dataset with time-evolving answers.10K-100K rowsHuman-curatedLLM[14]
Real-timeFreshQA [80]Dynamic fact QA benchmark with evolving answers.600 samplesMixed sourcesLLM[14]
Domain RetrievalPubMed [42]PICO-based medical search dataset linking reviews to PubMed.21k+ samplesSystematic re-viewsRecall@K[42]
Domain RetrievalTrial search [42]PICO-based clinical trial search linked to ClinicalTrials.gov.7k+ samplesManuallyRecall@K[42]
Domain RetrievalFinSearchBench-24 [50]Financial retrieval benchmark covering stocks, rates, policy, trends.1,500 queriesManuallyAccuracy[50]
Decision & QABusinessDQA [48]Decision QA benchmark with business scenarios in enterprise settings.301 pairsvideo gamesAccuracy[48]
MedicalCMB-Clin [87]CMB subset for clinical diagnosis reasoning in Chinese medical cases.74 casesTextbooks/diagnosticLLM/Expert materials[11]
MedicalMM-Cases [11]Medicine cases generated by GPT-40-mini, verified by doctors.609 casesLLM/doctor-reviewedLLM/Expert[11]
MedicalTCM-Cases [11]TCM patient cases generated by GPT-40-mini, verified by doctors.130 casesLLM/doctor-reviewedLLM/Expert[11]
+ +Fourth, multi-modal reasoning is often necessary. Deep Research tasks involve varied data types—text, code, knowledge graphs—and dynamic tool use such as web searches or code execution to enhance reasoning. + +Finally, handling multiple real-world constraints is vital. Tasks may require generating practical solutions under specific conditions, like designing hospitals in challenging environments with factors like heavy rainfall and seismic activity, as seen in the DeepSolution framework. This ensures outputs are feasible and relevant. + +To ensure the diversity and complexity of Deep Research tasks, their evaluation relies on datasets drawn from multiple domains. A few notable examples include: + +WildSeek Dataset [44]: This dataset is constructed from real-world user information-seeking scenarios and comprises 100 data points covering 24 fields, including economics, computer science, and law. Each data point is characterized by a topic, user goal, and domain label. For example: "Domain: Economics; Topic: Development of a Shared Trading Currency; Goal: Investigate how a new shared currency could eliminate transaction costs". WildSeek effectively evaluates models' competence in dynamic interaction and multi-source information integration. + +GAIA [62]. The GAIA Benchmark, developed jointly by Meta AI, Hugging Face, and others, is a comprehensive evaluation framework designed to assess general AI assistants' ability to handle real-world problems. It features 466 carefully crafted tasks spanning language reasoning, visual perception, multi-agent collaboration, and adaptability, focusing on key skills like reasoning, multimodal processing, web browsing, and tool use. GAIA measures performance across dimensions such as task execution, adaptability, collaboration, generalization, and real-world reasoning with metrics like completion rate, response quality, efficiency, and robustness. Unlike traditional benchmarks, it emphasizes robustness and reliability in everyday scenarios, supports zero-shot evaluation, prevents data contamination, and is widely used in research and industry to guide AI development. + +SolutionBench [54]: This dataset spans eight engineering domains, including environmental, mining, and transportation engineering. Each instance presents a complex engineering problem with specific constraints. For example: "Design a safe and efficient hospital construction plan in a region with 3000mm annual rainfall, expansive soils, and frequent seismic activity." SolutionBench evaluates models' ability to address multi-constraint problems and integrate specialized knowledge effectively. + +The current evaluation system for DeepResearch faces the dual challenges of scarce specialized testing tasks and the difficulty of assessing complex, lengthy reports: On one hand, existing benchmark tests only cover basic capabilities and lack systematic evaluation standards in specialized scenarios like business analysis and policy assessment; on the + +other hand, the multimodal integration, logical chain verification, and domain adaptability testing of long reports pose technical bottlenecks for traditional assessment methods, necessitating the development of new evaluation tools that integrate logic graphs, dynamic scenario simulation, and domain knowledge bases. + +In the future, the evaluation system will evolve into a multidimensional framework, including the construction of a three-level indicator matrix covering basic capabilities, reasoning levels, and application value. Overcoming these evaluation bottlenecks requires both technological innovation and joint standard-building efforts. This concerns not only the reliability validation of intelligent research tools but also the reshaping of research evaluation paradigms and industrial application boundaries. + +6.2.2 PhD (Expert)-Level Complex Reasoning. The integration of RAG with advanced reasoning has become essential for tackling expert-level, complex cognitive tasks, particularly at the PhD level. These tasks, including competitive programming, theorem-driven proof reasoning, and cross-disciplinary knowledge retrieval, require multi-layered logical inference and precise coordination between dynamic retrieval and domain-specific knowledge. PhD-level reasoning differs from standard evaluations across three dimensions: knowledge intensity, procedural rigor, and domain specificity. Knowledge intensity demands dynamic access to deep, specialized knowledge, such as analyzing dynamic programming time complexity or applying algebraic topology theorems—needs that surpass general corpora and call for domain-specific knowledge graphs and retrieval methods. Procedural rigor involves mathematical precision in multistep proofs, requiring logical consistency in symbolic manipulation, theorem use, and counterexample refutation, as seen in international math competitions. Domain specificity reflects tailored reasoning methods, e.g., handling synchronization in concurrent programming or employing tensor calculus in quantum field theory. + +Evaluation systems for such tasks are inherently multilayered and multimodal. The USACO Benchmark [71] offers a graduated difficulty scale for programming reasoning, testing both correctness and algorithmic constraints like time complexity. TheoremQA-Math [9] links formalized math problems to theorem libraries, demanding verifiable mappings between theorem applications and calculations. Cross-disciplinary datasets like StackBio and StackEcon [53] assess models' ability to extract critical knowledge from dense, domain-rich documents, serving as strong tests for domain-oriented retrieval accuracy. + +Modern evaluation surpasses traditional end-to-end tests by combining process and outcome validation. Frameworks like CR-Planner [52] use dual models—a Sub-Goal Critic to score reasoning chains and an Execution Critic to evaluate retrieval—allowing fine-grained step monitoring. For + +example, in dynamic programming, key steps like formulating state transitions and retrieving boundary conditions receive targeted feedback. Similarly, Search-O1 [51] quantifies knowledge completeness by tracking uncertainty indicators (e.g., tentative language), measuring confidence and accuracy. Outcome validation maintains strict correctness benchmarks in programming and combines metrics like F1 scores with expert review in open-domain scientific QA to ensure precise understanding of domain-specific terms. + +# 6.3 Challenges and Future Directions + +6.3.1 Complex Domain Tasks. Recent advances in RAG have provided novel solutions for more complex tasks in professional domains. These downstream tasks transcend the limitations of traditional question-answering models that rely solely on simple retrieval-generation patterns, involving challenges such as real-time information acquisition, integration of domain expertise, and dynamic decision-making support. The nature of these tasks can be characterized along three interrelated dimensions: (1) temporal dynamics, emphasizing the rapid changes in data and reasoning environment; (2) domain specificity, focusing on deep integration of industry knowledge and structured data; and (3) reasoning chain complexity, reflecting requirements for multi-stage reasoning and fine-grained decomposition of queries. + +To rigorously evaluate such systems, innovative benchmarking approaches have been proposed. The FinSearchBenchmark-24 dataset, for example, encompasses five months of market data variations, integrating multi-variable interactions across stock, policy, and industrial sectors, and includes over 1,500 multiple-choice questions, thereby surpassing the constraints of traditional static benchmarks. The evaluation adopts a hierarchical and quantitative methodology: the foundational level measures model accuracy and response latency; the intermediate layer assesses the temporal sensitivity of information relevance and the contribution of retrieval mechanisms to reasoning outcomes; and the advanced layer employs ablation studies to highlight performance variances under dynamic temporal decay. This multifaceted evaluation not only differentiates surface-level retrieval capabilities but also rigorously measures the synergy between reasoning quality and temporal context, furnishing theoretical and practical foundations for long-term stability and predictive accuracy in complex domain systems. + +Experimental findings further reveal that establishing long-term evaluation protocols with temporal weighting functions is indispensable for adapting to realistic dynamic environments. Nonlinear declines in decision accuracy, observed when extending relevance windows from 72 to 168 hours, emphasize the importance of factoring temporal decay into assessment frameworks. Future work should extend these evaluation protocols to high-stakes domains such as medical diagnostics and legal consultation, where the standardization of interpretability metrics will critically support + +the evolution of RAG+ reasoning systems toward robust and trustworthy decision-assistance platforms. + +6.3.2 Decision Support and Active Retrieval. The expansion of RAG+Reasoning frameworks into specialized tasks has fostered two complementary research paradigms: decision optimization and active retrieval. In the decision optimization category, systems must leverage heterogeneous structured data, rule bases, and objective functions to formulate optimal strategies. Representative systems like PlanRAG formalize Decision Question Answering (Decision QA) tasks targeting enterprise-level scenarios including supply chain optimization, industrial resource allocation, and market price regulation. These tasks require planning multimodal reasoning paths where models iteratively retrieve data from relational and graph databases, integrate intricate business rules, and iteratively refine decision-making paths through replanning mechanisms. To evaluate such capabilities, the Decision QA (DQA) benchmark creates dual database versions (MySQL and Neo4j) derived from economic systems in strategy games, assessing cross-structured generalization. The evaluation consists of a three-tier framework: the core tier measures answer accuracy; the intermediate layer diagnoses error types to identify system bottlenecks; and the foundational tier focuses on retrieval efficiency and the impact of replanning frequency. This structured evaluation framework not only tracks performance but also offers actionable insights for system refinement. + +Conversely, the active retrieval evaluation addresses the challenge of dynamically determining when and how to invoke retrieval under complex multimodal contexts. Unlike rigid traditional RAG systems, UAR applies lightweight classifiers for fast, accurate triggers, improving performance in time-sensitive or creative tasks. Tested on AR-Bench, it combines binary trigger accuracy with GPT assessments, exact matches, and human reviews, boosting adaptability across diverse contexts. + +Emerging trends in these evaluation paradigms indicate a shift from static, rule-based frameworks to dynamic system simulations, as exemplified by DQA's use of game engine-generated datasets to simulate realistic environments. Similarly, active retrieval tasks progress from simple retrieval trigger decisions toward collaborative multi-criteria decision-making. Evaluation methodologies are concurrently evolving from singular performance metrics to multidimensional matrices comprising core effectiveness, diagnostic error distributions, and economic cost measures. + +# 7 Cost and Risk + +Integrating reasoning into RAG systems is neither effortless nor purely beneficial. Recent trends have exaggerated its advantages while downplaying the costs and risks. This trade-off between performance and cost is crucial. This section examines the expenses and misuse risks linked to adding + +![](images/5456685368ffe44fb4c5b81029bd5ef81d13e2a9f1ac24b37bf34bf87ce8844d.jpg) +Figure 8. From LLM to RAG and then to RAG+Reasoning, performance improvement comes with additional cost. + +reasoning to RAG systems. As shown in Figure 8, the cost of moving from LLM to RAG, then to RAG + Reasoning, incurs an inevitable "invisible tax". Though often hidden by performance gains, this cost is vital in assessing these methods' overall practicality and efficiency. + +The shift from LLM to RAG moves from simplicity to enhanced knowledge handling by incorporating external information. A basic LLM provides direct, efficient answers with low latency and token use but is limited to pre-trained knowledge, restricting complex or up-to-date queries. RAG overcomes this by adding a vector database for external retrieval, vastly expanding response scope and reliability. However, this requires substantial data processing, storage, and introduces higher latency and token costs due to data chunking, encoding, indexing, and retrieval overhead. + +Advancing from RAG to RAG + Reasoning adds multistep reasoning capabilities, enabling complex task handling, autonomous decisions, and more context-aware responses through intricate reasoning. This comes at the expense of increased delays, token consumption, processing demands, and greater complexity in system integration and maintenance. The reasoning layer's autonomy also brings opaqueness, unpredictability, and heightened security and reliability risks. These challenges highlight the necessity of carefully balancing effectiveness against costs when adopting RAG + Reasoning in real-world applications. + +# 7.1 Cost Trade-off in RAG+Reasoning + +Figure 9 illustrates typical works combining RAG and Reasoning, showing retrieval and reasoning demands alongside token consumption. While integrating dynamic knowledge retrieval with multi-step reasoning greatly improves accuracy in more complex tasks, the resulting systemic costs are often underestimated in research and practice. These costs + +grow non-linearly, causing serious efficiency bottlenecks in real-world use. The tradeoff between effectiveness and efficiency stems from RAG+Reasoning's architecture: multistage task decoupling, dynamic path planning, and intermediate state preservation. These features improve reasoning quality but trigger cascading increases in computational resources, token usage, and reduced retrieval efficiency. This section explores these implicit tradeoffs from the angles of resource use, token consumption, and retrieval efficiency. + +7.1.1 Non-Linear Growth of Computational Resources. The RAG+Reasoning framework separates retrieval and reasoning into multiple stages, causing computational demands to grow non-linearly. Dynamic chain-of-reasoning methods execute multiple LLM generations and retrievals per inference, resulting in complexity far exceeding baseline models. Fixed-length reasoning chains trigger repeated retrieval and generation calls, increasing resource needs with task complexity. More advanced techniques like MCTS-guided methods add rounds of candidate path generation and evaluation, further multiplying runtime and memory usage on GPUs compared to linear methods. Even simpler multi-step planning tasks incur much higher overhead than single-stage retrieval models due to extra graph construction and analysis. While this resource intensity improves inference accuracy, it poses serious scalability challenges under limited resources as computational costs grow superlinearly with model size, retrieval chain length, and task complexity. +7.1.2 Implicit Token Inflation. Multi-step reasoning frameworks inherently cause significant token inflation through iterative intermediate processes like thought chains, retrieved documents, and verification feedback. Active learning setups consolidate multiple intermediate results—retrieved documents, counterfactuals, multi-round validations—leading to + +![](images/43b9c3e6021b5e7e0d22dfa723c29a2ecc7d344929cf0de432c2f1383653b5ca.jpg) +Figure 9. Cost quadrant diagram of retrieval and reasoning requirements + +token usage well beyond typical limits. Chain-based retrieval also generates token bloat due to exhaustive candidate path exploration. Iterative reasoning path selection, expansion, and evaluation add heavy token overhead in tasks needing deep reasoning chains involving extensive sequence generation and evaluation. Token usage grows exponentially with task complexity and increases further when intermediate reasoning favors depth or breadth. This inflation raises API costs and memory demands, especially in long-text generation like Deep Research [106]. + +7.1.3 Marginal Decline in Retrieval Efficiency. Dynamic retrieval improves knowledge precision but suffers diminishing efficiency as task complexity increases. Adaptive methods reduce retrievals for simple tasks but still require multiple iterations for complex ones, adding significant overhead compared to standard RAG. The tradeoff between retrieval quality and frequency further limits efficiency. High-accuracy retrieval methods incur heavy computational and time costs, negating their efficiency benefits. Even advanced retrieval-trigger optimizations can't fully remove this overhead due to extra training and deployment costs [41]. This natural efficiency ceiling highlights ongoing challenges in balancing retrieval accuracy and resource use, especially in large, complex tasks. + +7.1.4 Toward a Cost Model Framework. Against this backdrop, the development of fine-grained cost models becomes a necessary precondition for balancing effectiveness and efficiency. Existing evaluation metrics, which often rely on single-task performance indicators (such as Exact Match or F1) or coarse-grained runtime statistics, lack the comprehensiveness to jointly model computational resources, token flow, and retrieval overhead. Consequently, they fail to quantify the true tradeoffs in reasoning mechanisms. For instance, while multi-hop reasoning may improve task accuracy, these improvements are frequently offset by exponential growth in token consumption and latency relative to baseline methods. A fine-grained cost model would enable researchers and practitioners to more accurately evaluate the real benefits of reasoning-centric frameworks while addressing the underexplored interplay between computational cost and task performance. + +# 7.2 Potential Risk of Over-Thinking + +In the process of developing deep thinking models, "overthinking" poses a key risk to system efficiency and reliability [10, 15, 19, 30, 74, 81], and this issue is further amplified after combining with RAG. It appears as redundant reasoning steps, excessive validation of known conclusions, or unnecessarily broad retrieval scopes, wasting computational + +resources, increasing error propagation, and degrading performance. For example, in financial risk assessment, an LLM with RAG might retrieve multiple similar market reports and repeatedly verify the same economic indicators rather than focusing on core risks, leading to delayed decisions. This stems from an imbalance between reasoning and retrieval: after accessing external knowledge, the model can enter a "self-validation loop," repeatedly parsing overlapping or contradictory documents. The generation module, seeking reliability, may trigger further retrievals, creating a feedback loop that worsens inefficiency. This issue is critical in real-time systems like medical diagnosis, where over-retrieval of irrelevant literature can delay urgent decisions. + +Case studies show the impact of overthinking [74]. In legal document interpretation, early reasoning errors can amplify through the retrieval-generation loop, causing retrieval along incorrect paths and yielding illogical conclusions. This error propagation is evident in systems like the Search-o1 [51], where flawed information extraction misguides subsequent reasoning. In industrial equipment manual interpretation, overextended reasoning with highly similar documents risks obscuring critical parameter differences, increasing procedural errors. These examples illustrate that overthinking not only hampers knowledge integration but also creates safety hazards in practical applications. + +To mitigate these risks, researchers propose multiple optimization frameworks. ReaRAG [49] limits reasoning chain length and incorporates self-reflection to prune invalid branches. A simple and effective way is to use a two-stage filtering process, first narrowing documents by metadata, then validating fragment relevance, reducing redundant information—for instance, retrieving only relevant legal clauses rather than entire regulatory texts. The DeepSeek R1 [26] applies reinforcement learning with distillation to penalize redundant steps, cutting repeated formula validation in math proofs by over $40\%$ . These approaches transform open-ended reasoning into controlled, goal-directed processes, using methods like attention weight analysis to measure information gain or confidence functions to evaluate reasoning paths. + +Current research balances constraints with model creativity. Knowledge graph-guided reasoning is tested in clinical trials to prioritize key medical features over exhaustive literature retrieval [11]. Causal reasoning models aim to break error chains; for example, in financial forecasting, causal graphs restrict reasoning to logically relevant macroeconomic links. Adaptive stopping strategies adjust reasoning depth in customer service—simple queries use preset templates, complex issues activate multi-hop reasoning. These advances reshape retrieval-augmented reasoning, with the core challenge being to develop evaluation frameworks that avoid both "cognitive stagnation" from excessive constraints and "cognitive overload" from insufficient control. + +Future progress will integrate cognitive science with computational modeling. By mimicking human "intuition-verification" + +decision-making, LLMs could switch seamlessly between rapid response and deep reasoning. In high-risk fields like industrial fault diagnosis, such hybrid models can quickly propose contingency plans after initial retrieval while verifying their validity through deeper analysis. This layered approach reduces overthinking risks and offers a safe, controllable path for applying LLMs in critical industries. + +# 8 Practical Guide + +The combination of RAG and Reasoning is not a one-size-fits-all solution; it requires careful evaluation of each scenario's unique needs. As a rapidly evolving and relatively new field, practical applications are still limited, making best practices hard to define. This chapter abstracts and summarizes the key traits of typical RAG+Reasoning application domains and offers practical guidelines for system design based on these features. It provides recommendations on leveraging RAG's strengths with Reasoning, highlighting priorities, pitfalls to avoid, and current opportunities (Figure 10). The goal is to promote wider adoption and effective use of this technology in diverse, complex real-world settings. + +# 8.1 Domain characteristics + +As illustrated in the left part of Figure 10, we develop a seven-dimensional feature system based on the three core stages of RAG—query, retrieval, and generation—to systematically analyze challenges and adaptation needs across various industries. The query stage emphasizes the complexity of intent understanding and the demand for advanced reasoning, recognizing that industries differ in query abstraction and specificity; some require quickly capturing implicit, deep intentions, while others need complex reasoning. Effective preservation of original semantic meaning during understanding and reasoning is key to improving RAG performance. Retrieval focuses on the system's adaptability to diverse and dynamic knowledge sources, which vary from rich multi-domain data to rapidly updating information; frequent updates and fragmented knowledge present challenges that demand effective integration to ensure consistent support for generation. The generation stage requires high-quality outputs, with strict control over hallucinations—especially critical in sensitive fields like healthcare and law—along with varying latency requirements for real-time or delayed responses. Explainability and traceability at this stage are essential for system credibility and serve as key evaluation metrics. This comprehensive framework reveals technical bottlenecks and guides improvements, and is applied to analyze four representative domains: finance, healthcare, law, and personal assistants. + +8.1.1 Finance. In the finance domain, user queries typically focus on structured needs like investment decisions and risk forecasting. While intent understanding is moderately complex, the system must perform advanced reasoning amid + +![](images/7ac1eba9b0f22ec452c189570cac365dfa05f5cba364f27f64c05775b1c82bff.jpg) +Figure 10. Practical guide to synergizing RAG and Reasoning + +rapidly changing market conditions, relying heavily on external knowledge and frequent updates. For example, portfolio return forecasting integrates time series analysis, policy interpretation, and cross-market reasoning. Retrieval demands handling diverse data sources—real-time market data, annual reports, and regulatory filings—with update cycles often measured in minutes. During generation, strict latency and hallucination control are crucial, as outputs must include decision-making suggestions with full data traceability. Investment research reports, for instance, require annotated key indicators, their data sources, and computation logic to ensure transparency and regulatory compliance. High latency control and robust traceability are essential to maintain transparency and adherence to financial regulations. + +8.1.2 Healthcare. Healthcare queries involve complex medical semantic parsing, often with ambiguous terms or incomplete symptoms. For example, "persistent chest pain with shortness of breath" requires multi-hop reasoning across cardiology, pulmonology, and emergency medicine. Retrieval must integrate electronic health records, medical imaging, and up-to-date clinical guidelines. In generation, hallucination tolerance is minimal—errors in drug dosages or protocols risk malpractice. Therefore, accuracy, timeliness, and explainability are paramount, with every decision step traceable and verifiable. +8.1.3 Legal Services. Legal consultations often require interpreting statutes and citing cases, balancing precise legal terms with natural language nuances. Retrieval depends on structured, infrequently updated sources like case law databases and local regulations. Generation demands accuracy—for instance, drafting contract clauses must precisely cite specific statutes (e.g., Article 472 of the Civil Code) down + +to the paragraph level for traceability. Explainability is essential, with traceability usually above $95\%$ , and probabilistic language avoided to comply with strict judicial documentation standards. + +8.1.4 Personal Assistants. This domain features diverse, dynamic user needs, including schedule management, real-time navigation, and open-domain conversations. Accurate intent disambiguation through contextual awareness is crucial. Retrieval integrates fragmented sources like user behavior logs, geolocation, and social media. Generation latency varies: weather updates require sub-second responses, while travel planning can tolerate $5+$ seconds. Hallucination tolerance depends on context—creative outputs are acceptable for recipes but not for flight information, which demands full accuracy. This necessitates adaptive verification in the RAG system. Though intent complexity is lower than in healthcare or legal fields, the domain's interaction diversity requires heavy reliance on external knowledge and dynamic balancing of latency and accuracy. + +# 8.2 Do's and Don'ts + +Building on aforementioned domain characteristics, we further identify six common scenarios, and derive technical adaptation principles for each. This section outlines key optimization strategies (Do's) and prohibitions (Don'ts), to guide the co-design of RAG and reasoning. +8.2.1 Structured Reasoning Scenarios. For scenarios requiring multi-step logical decomposition and structured knowledge dependency, such as portfolio return prediction, Chain-of-Thought (CoT) task decomposition and knowledge graph (KG)-driven graph reasoning approaches should be + +employed. Complex problems should be broken into verifiable sub-tasks, such as coupling market trend analysis with policy impact assessment, while leveraging knowledge graph constraints to ensure logical completeness and auditability. It is essential to incorporate a temporal validation layer to cross-check the consistency of timestamp-sensitive information (e.g., real-time market data or emergent regulatory policies) within a dynamic knowledge base. Approaches that exclude retrieval-based verification of salient features must be avoided, as they may lead to reasoning biases arising from the absence of structured knowledge anchors (e.g., critical indicators from financial statements). Furthermore, the reasoning space of LLMs should be constrained within domain-specific knowledge frameworks to prevent irrelevant or invalid deductions. + +8.2.2 Dynamic Demand-Responsive Scenarios. For scenarios characterized by rapidly shifting demands and user preference variability, such as itinerary planning and multimodal interaction in personal assistant services, a dynamic adaptation mechanism based on prompt engineering is recommended. By dynamically associating fragmented knowledge units (e.g., user behavior history and real-time traffic updates) with semantic templates and employing heuristic rules for search-space pruning (e.g., prioritizing locally updated information within the past 24 hours), the system can balance contextual adaptability with response speed. Model fine-tuning or reinforcement learning (RLHF/DPO)-based strategy updates should be avoided due to their lengthy iterative cycles and computational overhead, which cannot meet real-time responsiveness requirements, such as millisecond-grade reaction times for last-minute destination changes. Lightweight caching architectures should be implemented within the retrieval system, prioritizing frequently accessed knowledge fragments, such as operating hours of popular tourist attractions, to achieve an equilibrium between dynamism and stability. + +8.2.3 Deterministic Decision-Making Scenarios. In scenarios requiring a single, reliable conclusion, such as clinical diagnosis generation in the healthcare domain, a multi-level deterministic assurance system should be established. Time-validation layers can filter outdated knowledge (e.g., therapies no longer approved), while field-sensitive retrieval modules trigger predefined decision rules conforming to up-to-date clinical guidelines (e.g., those codified within the latest version of the International Classification of Diseases [ICD]). Knowledge graph path constraints should restrict the reasoning process to validated causal links within medical logic (e.g., linking symptom patterns to laboratory test results within corroborated diagnostic pathways), thereby minimizing the likelihood of deviations from standard protocols. Probabilistic exploration strategies that generate alternative hypotheses (e.g., speculative differential diagnoses for atypical pneumonia) should be strictly disallowed to avoid clinical + +misjudgments. Additionally, delegating decision-making authority to external classification models must be avoided to maintain end-to-end explainability and a clear causal link in the decision-making pipeline. + +8.2.4 Time-Sensitive Scenarios. In tasks highly sensitive to response delays, such as real-time risk warnings and trading decisions in the financial sector, heuristic rules should be employed to prioritize indexing of frequently queried knowledge units (e.g., volatility indices and liquidity indicators) at the top of the search hierarchy. Directed retrieval expansion strategies that preload potentially associated information (e.g., contractual clauses of derivative instruments tied to underlying assets) can further reduce latency in multi-turn interactions. Monte Carlo Tree Search (MCTS) and other sample-based algorithms are ill-suited for such scenarios due to the excessive computational complexity caused by branch expansion, rendering them infeasible within tight time constraints (e.g., milliseconds). Similarly, the invocation of complex mathematical solvers (e.g., numerical solutions for stochastic differential equations) can introduce uncontrollable delays and should be replaced with lightweight rule-based mechanisms (e.g., threshold-triggering mechanisms based on historical volatility ranges). + +8.2.5 Risk-Sensitive Scenarios. For scenarios with minimal tolerance for errors, such as contract clause generation and citation of judicial interpretations in the legal sector, a dual-layer defensive mechanism must be employed. A pre-action review layer should validate the compliance of generated content with statutory standards (e.g., ensuring consistency between liability clauses and Article 577 of the Civil Code), while a reliability validation layer performs cross-referencing validation across multiple sources (e.g., aligning Supreme Court precedents with regional court guidelines) to resolve potential conflicts. Retrieval systems must include version control modules to track and update legal references (e.g., automatically flagging repealed local statutes). Unconstrained reinforcement learning-based text generation methods must be avoided, as their exploratory nature risks violating the normative requirements of legal documents (e.g., generating presumptive liability terms unsupported by judicial interpretations). All decision-making actions must pass through deterministic rule engines to filter inadmissible outputs, and the system should never execute decision actions autonomously, such as generating legally binding arbitration notices without oversight. + +8.2.6 Complex Path Exploration Scenarios. In exploration tasks involving multiple possible trajectories, such as differential diagnosis and therapeutic pathway optimization in medicine, weighted ranking search algorithms should balance search depth and breadth. Knowledge graph topology can guide prioritization (e.g., standard treatment procedures for acute coronary syndrome), while Monte Carlo Tree + +Search can extend exploration into uncommon differential paths (e.g., rare genetic metabolic disorders). Dynamic pruning threshold functions should be designed (e.g., adjusting the scope of differential diagnosis based on patient history) to eliminate low-confidence hypotheses in real time, thereby controlling computational scale. Brute-force searching of all potential paths (e.g., concurrently testing hundreds of pathogens for nonspecific symptoms) should be avoided to prevent exponential computational scaling. Careful handling of specific token triggers during retrieval (e.g., avoiding spurious associations between "fever" and unrelated oncological hyperthermia research) is critical to maintaining logical coherence in diagnostic reasoning. + +# 8.3 Opportunity Points + +Based on the Do's and Don'ts of current technologies analyzed in the previous section, there remain numerous directions with substantial academic value and application potential that have yet to be fully explored. This section systematically discusses several promising opportunity points across three dimensions: data and indexing, models and methodologies, and application services. + +# 8.3.1 Data and Indexing. + +Cold-Hot Tiered Indexing and Dynamic Context Management. The challenge of managing massive and highly heterogeneous data resources lies in devising an effective cold-hot tiered indexing mechanism that prioritizes data according to their frequency of use and importance. Such a mechanism not only demands classification of data based on timeliness and access frequency but also requires integration with dynamic context management. This allows the system to intelligently retrieve the most relevant data according to the immediate context. + +Moreover, a dynamically updated indexing mechanism can mitigate the loss of data timeliness, which often leads to deteriorated inference accuracy. By ensuring access to the most recent and task-appropriate data, this approach reduces redundancy and incorrect retrievals associated with static indexing. When combined with automated task scheduling and resource allocation strategies, fine-grained real-time inference support can be achieved, significantly enhancing the system's overall efficiency. + +Cross-Institution Knowledge Base Construction. The construction of cross-institution or cross-domain knowledge bases offers new opportunities for advancing RAG+Reasoning research. At the core of large-scale cross-institutional knowledge bases lies the optimization of data integration and sharing mechanisms. This entails addressing challenges such as data security and privacy while adopting standardized data interfaces or leveraging federated learning paradigms to enable multidimensional data integration. + +Through semantic alignment across multiple sources, entity resolution, and concept abstraction, cross-institutional knowledge can be transformed into authoritative and richly contextualized knowledge bases. These enhanced repositories provide robust contextual support for reasoning tasks and can deliver deeper insights in areas such as healthcare, finance, and urban management. + +Fine-Grained Layering and Confidence Grading. In scenarios where retrieval and reasoning operate synchronously, the interpretability and reliability of generated outcomes are paramount. Fine-grained layering of data and indices, along with confidence grading of retrieval results, enables the system to selectively use the most trustworthy and relevant subsets of data during different stages of reasoning. This approach fosters transparency and traceability in final decisions or generative outputs. + +For instance, in medical diagnosis scenarios, confidence grading can initiate additional verification or expert review in high-risk cases. In the legal domain, confidence layering systematically presents key evidence and identifies sources of uncertainty, reducing reasoning vulnerabilities and minimizing the risk of erroneous conclusions caused by information ambiguity. + +# 8.3.2 Models and Methodologies. + +Event-Driven Active Retrieval. Traditional retrieval mechanisms are predominantly passive. However, event-driven active retrieval presents a promising exploration avenue. By monitoring critical events, such as the injection of new data, user interactions, or changes in external sensors, event-triggered retrieval and reasoning processes can be initiated to capture and respond to potential risks and opportunities in real time. Integrating methodologies such as sequence-based event detection or multitask-learning-based intent recognition can facilitate automatic determination of when and how to trigger retrieval actions. Iteratively optimizing these processes contributes to a more efficient and continuous reasoning loop. + +Spatiotemporal-Aware Retrieval and Association. Many applications, such as natural disaster monitoring, traffic flow prediction, and inventory management in retail, exhibit strong dependencies on temporal and spatial dimensions. By incorporating spatiotemporal-aware algorithms, retrieval processes can prioritize or emphasize crucial documents according to constraints tied to time and space. This not only enhances timeliness but also improves the purposefulness and accuracy of reasoning. + +Furthermore, modeling the evolution of events within spatiotemporal dimensions—when combined with semantic indexing and vector-based retrieval mechanisms in RAG—can enable more precise characterization and utilization of complex spatiotemporal dynamics during reasoning. + +Multimodal Fusion in Retrieval and Reasoning. Multimodal data (e.g., text, images, audio, video, and sensor data) collectively constitute a richer contextual environment, offering critical cues for reasoning tasks. However, existing studies are often limited to the retrieval of single or a few data modalities. Advancing research on multimodal fusion and reasoning mechanisms under the RAG+Reasoning framework has the potential to greatly enhance the system's capacity for addressing complex queries. + +The research focus lies in constructing cross-modal representation learning and alignment methods, enabling unified representations of the same entities or events across different modalities. During retrieval, confidence scores for each modality can be integrated into a comprehensive ranking process, culminating in multimodal-informed joint decision-making during reasoning. This approach not only improves contextual understanding in complex tasks but also broadens the application scope of RAG technologies in scenarios such as expert systems and autonomous driving, where sensory integration and interpretation are critical. + +Dynamic Risk Propagation Modeling and Management. The tight coupling of retrieval and reasoning with multi-stage decision-making inevitably introduces risk propagation issues. Misjudgments of high-risk or low-confidence documents during upstream retrieval are often inherited by downstream reasoning processes, amplifying uncertainties and increasing error margins. To address this, dynamic risk modeling should be embedded within retrieval workflows, enabling risk quantification, tracking, and management at multiple stages. When necessary, risk mitigation mechanisms or process rollbacks can be triggered, creating a closed-loop correction framework. + +Incorporating strategies for analyzing and managing risk propagation is not only a technical challenge but also a matter of system deployment and standardization. In high-stakes domains such as healthcare and financial risk management, establishing comprehensive safety standards and compliance protocols will be crucial. These protocols should treat dynamic risk propagation management as a critical component of evaluating and iterating knowledge retrieval and reasoning systems. + +# 8.3.3 Application Services. + +Validation of Logical Chain Completeness. While RAG with Reasoning can provide partially interpretable reasoning outputs, verifying the completeness of logical chains remains a challenge. Future research could integrate formal verification or symbolic reasoning techniques to ensure consistency and completeness across key reasoning nodes and intermediate conclusions. This would prevent logical gaps or illogical leaps in reasoning, offering robust regulatory support for high-stakes industries such as law and finance. + +Intervenable Generation During Reasoning. Contemporary Agentic RAG often operate as "black boxes," rendering external interventions nearly impossible during generative reasoning tasks. However, providing mechanisms for human intervention—such as through visualization or interactive interfaces—could enable experts or users to perform manual corrections, initialize prior knowledge, or modify interim assumptions during the reasoning process. This would substantially enhance the system's flexibility and safety. + +Specifically, intervenable generation allows not only post hoc error corrections but also proactive identification and rectification of potential risks or biases at earlier stages. Interactive interpretable reasoning platforms or visualization tools grounded in knowledge graphs could empower users to scrutinize and influence reasoning workflows, thereby enhancing confidence and control in decision-making processes across diverse domains. + +Risk Decision Interception Firewalls. In closed-loop automated tasks such as algorithmic trading or medical diagnostic decision-making, erroneous reasoning outputs can lead to catastrophic outcomes. To mitigate such risks, the system architecture should incorporate risk decision interception firewalls, which perform multidimensional validations at critical reasoning nodes or prior to outputting decisions. When confidence levels or high-risk indicators breach thresholds, these firewalls can block decision outputs or escalate them for stricter human review. + +This mechanism serves as a "final line of defense" for RAG+Reasoning systems, ensuring decision security in large-scale automated information networks. It also provides a robust foundation for compliance and regulatory auditing, enabling safer deployment in critical applications. + +Edge-Cloud Collaborative Retrieval and Reasoning. With the rapid development of IoT and 5G technologies, many scenarios demand on-site data collection and preliminary processing on edge devices, followed by high-level retrieval and reasoning tasks on cloud platforms. Efficiently partitioning tasks, allocating resources, and maintaining consistency between indexes and models across the edge-cloud continuum represent critical research directions. + +Leveraging techniques such as lightweight model compression, distributed index synchronization, and communication optimization can ensure fast reasoning while maximizing resource utilization. Edge-cloud collaborative solutions are particularly impactful for real-time industrial monitoring and smart city applications, reducing network latency and bandwidth bottlenecks while ensuring accurate and timely inference outputs. + +In summary, RAG+Reasoning systems present many untapped opportunities across various dimensions. Further research and practical validation could greatly improve their use in complex, high-risk scenarios while fueling new growth in GenAI. + +# 9 Future Trends + +In this chapter, we summarize four major trends in technological advancements based on current research, aiming to elucidate and guide the potential future directions of RAG. + +# 9.1 The Integration of RAG and Graph + +Recent developments have witnessed a growing synergy between RAG systems and graph-based approaches. The intrinsic benefits of graph structures, such as explicit logical relationships and knowledge indexing, have enabled new paradigms for addressing challenges in global reasoning, dynamic data management, and personalized services within RAG systems. + +# Knowledge Organization. + +Graph-structured knowledge organization frameworks offer a powerful alternative to traditional vector-based retrieval methods, excelling in modeling complex relationships and supporting global reasoning. For example, GraphRAG [18] combines hierarchical graph indexing with community detection to extract entity relationship networks from text corpora, enabling large-scale thematic analysis through hierarchical summaries. Building on this, PIKE [82] introduces a multi-level heterogeneous knowledge graph that organizes documents, semantic segments, and refined knowledge units into a three-layer hierarchy, improving extraction accuracy and multi-hop reasoning via atomized knowledge construction and task decomposition. For dynamic personalization, EMG-RAG [89] features a three-layer Editable Memory Graph architecture that structures memory data by ontology classification, subclass, and entity relationships, using reinforcement learning to enable real-time updates and multidimensional queries. Together, these advances leverage graph topologies to address the limitations of conventional RAG systems—such as one-dimensional representation and weak contextual links—enabling multilevel reasoning from local fact retrieval to global thematic summarization and forming a foundation for interpretable, adaptive RAG systems. + +Symbolic Reasoning. Graph-structured symbolic reasoning methods leverage the multi-hop reasoning power of Knowledge Graphs (KG) to better manage complex semantic and logical relationships. Frameworks like HippoRAG2 and the Think-on-Graph (ToG) [60] series exemplify this. HippoRAG2 [28] builds open knowledge graphs and uses personalized PageRank with a dense-sparse coding approach inspired by brain memory, boosting performance in factual memory, semantic understanding, and multi-hop reasoning. Likewise, ToG-2 combines iterative retrieval of knowledge graphs and documents, using relationship discovery, entity pruning, and context-driven graph searches to integrate fine-grained information from unstructured text, enhancing implicit relationship detection. + +Task Planning. Graph-based task planning in RAG systems enhances complex problem-solving by overcoming the + +limitations of traditional linear workflows, which struggle with multi-step or multimodal reasoning. These approaches build dynamic knowledge graphs, like Mind Maps, to explicitly model logical dependencies and context. For instance, the Agentic Reasoning [92] transforms reasoning chains into graph structures for entity extraction, relation identification, and community clustering, enabling dynamic path tracking and optimized retrieval, excelling in tasks like doctoral-level GPQA [67]. Collaborative frameworks such as Co-STORM extend this to multi-agent scenarios, representing queries, tool calls, and knowledge integration as traversable graph nodes to support task decomposition and adaptive reasoning. + +Tool Usage and Management. Graph-enhanced approaches to tool management overcome limitations of traditional dependency modeling by effectively capturing complex relationships like parameter passing, functional collaboration, and resource management. Graph RAG-Tool Fusion [57] models tools as graph nodes within a dual-layer architecture of core system APIs and domain-specific tools, encoding direct and indirect dependencies as edges. It uses a two-stage retrieval process: vector-based tool retrieval followed by a graph-based depth-first search to assemble dependency-compliant toolsets. + +# 9.2 Multi-Model Collaboration + +Multi-model collaboration has emerged as a pivotal strategy for enhancing task complexity handling and domain adaptability in RAG systems [13]. By integrating the strengths of different models, this approach achieves optimized performance. For example, the CR-Planner [52] combines general-purpose generation models (e.g., GPT-4) with domain-specific critic models (e.g., Llama-3-8B). This hybrid system dynamically orchestrates subgoal planning and execution evaluation, utilizing MCTS to generate high-quality training data. Similarly, UAR [14] employs intent-aware and knowledgerequirement classifiers to dynamically trigger retrieval, decoupling lightweight classification tasks from resource-intensive decoding operations of LLMs. Furthermore, Adaptive-RAG [41] deploys small-complexity classifiers to route queries into different levels of processing strategies, balancing response speed for simple queries with deep reasoning for complex ones. These strategies form a closed "generation-evaluation"loop, leveraging complementary strengths across models to achieve improved accuracy and computational efficiency. + +# 9.3 Multi-Modal Collaboration + +The breakthrough in Chain-of-Thought (CoT) capabilities of language models has catalyzed the transition of multimodal reasoning from perceptual-level integration to cognitive-level reasoning, promoting Multimodal Collaborative Reasoning as a key trend [4] By deeply integrating the logical reasoning capabilities of language models with the spatial-semantic representation of multimodal data, it significantly enhances information synthesis in complex scenarios [2]. + +For instance, in the medical domain, multimodal RAG systems such as MedCoT [56] utilize hierarchical expert systems to integrate CT imaging and pathology reports, enabling knowledge graph validation of diagnostic hypotheses and reducing misdiagnosis risks. Future research will likely focus on robust cross-modal knowledge alignment, progressive knowledge distillation, and adaptive reasoning frameworks. + +# 9.4 Customized Reinforcement Learning + +The application of reinforcement learning (RL) in RAG systems has become instrumental in improving module coordination and enhancing overall efficiency. Recent studies focus on designing reward mechanisms tailored to the specific needs of RAG systems. Frameworks such as RAG-Gym [96] and DeepRAG [24] model reasoning processes using Markov Decision Processes and introduce fine-grained process supervision mechanisms. Additionally, ReARTeR [49] and SmartRAG [20] incorporate trust-aware reward strategies and end-to-end policy optimization to achieve superior accuracy and robustness. Opportunities remain for further exploring automated reward modeling with LLMs to facilitate fine-grained supervision. + +# 10 Conclusion + +This paper has systematically reviewed the synergistic integration of Retrieval-Augmented Generation (RAG) and reasoning, providing a formal definition of reasoning within the RAG framework as a structured, multi-step, goal-driven process that dynamically combines parametric and retrieved knowledge to address complex problems. + +We presented a comprehensive taxonomy covering the purposes, collaboration paradigms, and implementation methods underlying RAG+Reasoning systems. The synergy enables more precise retrieval informed by logical analysis and enhances reasoning with contextually relevant, up-to-date knowledge beyond parametric limitations. + +While the enhanced reasoning capabilities allow tackling complex knowledge-intensive tasks such as deep research, expert-level problem solving, and domain-specific decision support, practical challenges remain. These include computational and token costs that grow non-linearly, risks of overthinking leading to inefficiency and error propagation, and the lack of evaluation frameworks that effectively assess intermediate reasoning quality alongside final results. + +To bridge the gap from theory to real-world application, we proposed practical design guidelines tailored to diverse domains like finance, healthcare, law, and personal assistants, emphasizing adaptability to heterogeneous, dynamic knowledge sources and strict requirements for output reliability and traceability. + +Finally, we identified promising directions for future research, including graph-structured knowledge integration, + +multimodal and multi-model collaborative reasoning architectures, and advanced reinforcement learning techniques for optimizing retrieval-reasoning workflows. + +Overall, this work establishes both a theoretical foundation and practical roadmap to drive the development of next-generation RAG+Reasoning systems capable of robust, transparent, and efficient cognition, paving the way for impactful applications across academia and industry. + +# References + +[1] Abdelrahman Abdallah, Bhawna Piryani, Jamshid Mozafari, Mohammed Ali, and Adam Jatowt. 2025. Rankify: A comprehensive python toolkit for retrieval, re-ranking, and retrieval-augmented generation. arXiv preprint arXiv:2502.02464 (2025). +[2] Mohammad Mahdi Abootorabi, Amirhosein Zobeiri, Mahdi Dehghani, Mohammadali Mohammadkhani, Bardia Mohammadi, Omid Ghahroodi, Mahdieh Soleymani Baghshah, and Ehsaneddin Asgari. 2025. Ask in Any Modality: A Comprehensive Survey on Multimodal Retrieval-Augmented Generation. arXiv preprint arXiv:2502.08826 (2025). +[3] Akari Asai, Zeqiu Wu, Yizhong Wang, Avirup Sil, and Hannaneh Hajishirzi. 2023. Self-rag: Learning to retrieve, generate, and critique through self-reflection. In The Twelfth International Conference on Learning Representations. +[4] Jing Bi, Susan Liang, Xiaofei Zhou, Pinxin Liu, Junjia Guo, Yunlong Tang, Luchuan Song, Chao Huang, Guangyu Sun, Jinxi He, et al. 2025. Why Reasoning Matters? A Survey of Advancements in Multimodal Reasoning (v1). arXiv preprint arXiv:2504.03151 (2025). +[5] Yuxi Bi, Yunfan Gao, and Haofen Wang. 2025. StePO-Rec: Towards Personalized Outfit Styling Assistant via Knowledge-Guided Multi-Step Reasoning. arXiv preprint arXiv:2504.09915 (2025). +[6] Mingyang Chen, Tianpeng Li, Haoze Sun, Yijie Zhou, Chenzheng Zhu, Fan Yang, Zenan Zhou, Weipeng Chen, Haofen Wang, Jeff Z Pan, et al. 2025. Learning to Reason with Search for LLMs via Reinforcement Learning. arXiv preprint arXiv:2503.19470 (2025). +[7] Peter Baile Chen, Yi Zhang, Michael Cafarella, and Dan Roth. 2025. Can we Retrieve Everything All at Once? ARM: An Alignment-Oriented LLM-based Retrieval Method. arXiv preprint arXiv:2501.18539 (2025). +[8] Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiannan Guan, Peng Wang, Mengkang Hu, Yuhang Zhou, Te Gao, and Wangxiang Che. 2025. Towards reasoning era: A survey of long chain-of-thought for reasoning large language models. arXiv preprint arXiv:2503.09567 (2025). +[9] Wenhu Chen, Ming Yin, Max Ku, Pan Lu, Yixin Wan, Xueguang Ma, Jianyu Xu, Xinyi Wang, and Tony Xia. 2023. Theoremqa: A theorem-driven question answering dataset. arXiv preprint arXiv:2305.12524 (2023). +[10] Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. 2024. Do not think that much for $2 + 3 = ?$ on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187 (2024). +[11] Yixiang Chen, Penglei Sun, Xiang Li, and Xiaowen Chu. 2025. MRD-RAG: Enhancing Medical Diagnosis with Multi-Round Retrieval-Augmented Generation. arXiv preprint arXiv:2504.07724 (2025). +[12] Yiqun Chen, Lingyong Yan, Weiwei Sun, Xinyu Ma, Yi Zhang, Shuaiqiang Wang, Dawei Yin, Yiming Yang, and Jiaxin Mao. 2025. Improving Retrieval-Augmented Generation through Multi-Agent Reinforcement Learning. arXiv preprint arXiv:2501.15228 (2025). +[13] Zhijun Chen, Jingzheng Li, Pengpeng Chen, Zhuoran Li, Kai Sun, Yuankai Luo, Qianren Mao, Dingqi Yang, Hailong Sun, and Philip S Yu. 2025. Harnessing Multiple Large Language Models: A Survey on + +LLM Ensemble. arXiv preprint arXiv:2502.18036 (2025). +[14] Qinyuan Cheng, Xiaonan Li, Shimin Li, Qin Zhu, Zhangyue Yin, Yunfan Shao, Linyang Li, Tianxiang Sun, Hang Yan, and Xipeng Qiu. 2024. Unified active retrieval for retrieval augmented generation. arXiv preprint arXiv:2406.12534 (2024). +[15] Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, et al. 2025. The Danger of Overthinking: Examining the Reasoning-Action Dilemma in Agentic Tasks. arXiv preprint arXiv:2502.08235 (2025). +[16] Alan Dao and Thinh Le. 2025. ReZero: Enhancing LLM search ability by trying one-more-time. arXiv:2504.11001 [cs.CL] https://arxiv.org/abs/2504.11001 +[17] Tim Dettmers, Artidoro Pagnoni, Ari Holtzman, and Luke Zettlemoyer. 2023. Qlora: Efficient finetuning of quantized llms. Advances in neural information processing systems 36 (2023), 10088-10115. +[18] Darren Edge, Ha Trinh, Newman Cheng, Joshua Bradley, Alex Chao, Apurva Mody, Steven Truitt, Dasha Metropolitansky, Robert Oazuwa Ness, and Jonathan Larson. 2024. From local to global: A graph rag approach to query-focused summarization. arXiv preprint arXiv:2404.16130 (2024). +[19] Chenrui Fan, Ming Li, Lichao Sun, and Tianyi Zhou. 2025. Missing Premise exacerbates Overthinking: Are Reasoning Models losing Critical Thinking Skill? arXiv preprint arXiv:2504.06514 (2025). +[20] Jingsheng Gao, Linxu Li, Weiyuan Li, Yuzhuo Fu, and Bin Dai. 2024. SmartRAG: Jointly Learn RAG-Related Tasks From the Environment Feedback. arXiv preprint arXiv:2410.18141 (2024). +[21] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, and Haofen Wang. 2023. Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997 (2023). +[22] Yunfan Gao, Yun Xiong, Meng Wang, and Haofen Wang. 2024. Modular rag: Transforming rag systems into lego-like reconfigurable frameworks. arXiv preprint arXiv:2407.21059 (2024). +[23] Zengyi Gao, Yukun Cao, Hairu Wang, Ao Ke, Yuan Feng, Xike Xie, and S Kevin Zhou. 2025. FRAG: A Flexible Modular Framework for Retrieval-Augmented Generation based on Knowledge Graphs. arXiv preprint arXiv:2501.09957 (2025). +[24] Xinyan Guan, Jiali Zeng, Fandong Meng, Chunlei Xin, Yaojie Lu, Hongyu Lin, Xianpei Han, Le Sun, and Jie Zhou. 2025. DeepRAG: Thinking to Retrieve Step by Step for Large Language Models. arXiv preprint arXiv:2502.01142 (2025). +[25] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025). +[26] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025). +[27] Zirui Guo, Lianghao Xia, Yanhua Yu, Tu Ao, and Chao Huang. 2024. Lighthrag: Simple and fast retrieval-augmented generation. (2024). +[28] Bernal Jiménez Gutiérrez, Yiheng Shu, Weijian Qi, Sizhe Zhou, and Yu Su. 2025. From RAG to Memory: Non-Parametric Continual Learning for Large Language Models. arXiv preprint arXiv:2502.14802 (2025). +[29] Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, et al. 2024. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008 (2024). +[30] Yancheng He, Shilong Li, Jiaheng Liu, Weixun Wang, Xingyuan Bu, Ge Zhang, Zhongyuan Peng, Zhaoxiang Zhang, Zhicheng Zheng, Wenbo Su, et al. 2025. Can Large Language Models Detect Errors in Long Chain-of-Thought Reasoning? arXiv preprint arXiv:2502.19361 + +(2025). +[31] Xanh Ho, Anh-Khoa Duong Nguyen, Saku Sugawara, and Akiko Aizawa. 2020. Constructing a multi-hop qa dataset for comprehensive evaluation of reasoning steps. arXiv preprint arXiv:2011.01060 (2020). +[32] Yubin Hong, Chaofan Li, Jingyi Zhang, and Yingxia Shao. 2025. FG-RAG: Enhancing Query-Focused Summarization with Context-Aware Fine-Grained Graph RAG. arXiv preprint arXiv:2504.07103 (2025). +[33] SU Hongjin, Howard Yen, Mengzhou Xia, Weijia Shi, Niklas Muennighoff, Han-yu Wang, Liu Haisu, Quan Shi, Zachary S Siegel, Michael Tang, et al. 2024. BRIGHT: A Realistic and Challenging Benchmark for Reasoning-Intensive Retrieval. In The Thirteenth International Conference on Learning Representations. +[34] Sheryl Hsu, Omar Khattab, Chelsea Finn, and Archit Sharma. 2024. Grounding by trying: Llms with reinforcement learning-enhanced retrieval. arXiv preprint arXiv:2410.23214 (2024). +[35] Jian Hu. 2025. REINFORCE++: A Simple and Efficient Approach for Aligning Large Language Models. arXiv preprint arXiv:2501.03262 (2025). +[36] Yunhai Hu, Yilun Zhao, Chen Zhao, and Arman Cohan. 2025. MCTS-RAG: Enhancing Retrieval-Augmented Generation with Monte Carlo Tree Search. arXiv preprint arXiv:2503.20757 (2025). +[37] Fantine Huot, Reinald Kim Amplayo, Jennimaria Palomaki, Alice Shoshana Jakobovits, Elizabeth Clark, and Mirella Lapata. 2024. Agents' Room: Narrative Generation through Multi-step Collaboration. arXiv preprint arXiv:2410.02603 (2024). +[38] Shayekh Bin Islam, Md Asib Rahman, KSM Hossain, Enamul Hoque, Shafiq Joty, and Md Rizwan Parvez. 2024. Open-rag: Enhanced retrieval-augmented reasoning with open-source large language models. arXiv preprint arXiv:2410.01782 (2024). +[39] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. 2024. Openai o1 system card. arXiv preprint arXiv:2412.16720 (2024). +[40] Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. 2024. Livecodebench: Holistic and contamination free evaluation of large language models for code. arXiv preprint arXiv:2403.07974 (2024). +[41] Soyeong Jeong, Jinheon Baek, Sukmin Cho, Sung Ju Hwang, and Jong C Park. 2024. Adaptive-rag: Learning to adapt retrieval-augmented large language models through question complexity arXiv preprint arXiv:2403.14403 (2024). +[42] Pengcheng Jiang. 2025. DeepRetrieval: Powerful Query Generation for Information Retrieval with Reinforcement Learning. arXiv preprint arXiv:2503.00223 (2025). +[43] Yucheng Jiang, Yijia Shao, Dekun Ma, Sina J Semnani, and Monica S Lam. 2024. Into the unknown unknowns: Engaged human learning through participation in language model agent conversations. arXiv preprint arXiv:2408.15232 (2024). +[44] Yucheng Jiang, Yijia Shao, Dekun Ma, Sina J Semnani, and Monica S Lam. 2024. Into the unknown unknowns: Engaged human learning through participation in language model agent conversations. arXiv preprint arXiv:2408.15232 (2024). +[45] Zhengbao Jiang, Frank F Xu, Luyu Gao, Zhiqing Sun, Qian Liu, Jane Dwivedi-Yu, Yiming Yang, Jamie Callan, and Graham Neubig. 2023 Active retrieval augmented generation. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing 7969-7992. +[46] Ashutosh Joshi, Sheikh Muhammad Sarwar, Samarth Varshney, Sreyashi Nag, Shrivats Agrawal, and Juhi Naik. 2024. REAPER: Reasoning based retrieval planning for complex RAG systems. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 4621-4628. + +[47] Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, et al. 2019. Natural questions: a benchmark for question answering research. Transactions of the Association for Computational Linguistics 7 (2019), 453-466. +[48] Myeonghwa Lee, Seonho An, and Min-Soo Kim. 2024. PlanRAG: A plan-then-retrieval augmented generation for generative large language models as decision makers. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers). 6537–6555. +[49] Zhicheng Lee, Shulin Cao, Jinxin Liu, Jiajie Zhang, Weichuan Liu, Xiaoyin Che, Lei Hou, and Juanzi Li. 2025. ReaRAG: Knowledge-guided Reasoning Enhances Factuality of Large Reasoning Models with Iterative Retrieval Augmented Generation. arXiv preprint arXiv:2503.21729 (2025). +[50] Jinzheng Li, Jingshu Zhang, Hongguang Li, and Yiqing Shen. 2024. An Agent Framework for Real-Time Financial Information Searching with Large Language Models. arXiv preprint arXiv:2502.15684 (2024). +[51] Xiaoxi Li, Guanting Dong, Jiajie Jin, Yuyao Zhang, Yujia Zhou, Yutao Zhu, Peitian Zhang, and Zhicheng Dou. 2025. Search-01: Agentic search-enhanced large reasoning models. arXiv preprint arXiv:2501.05366 (2025). +[52] Xingxuan Li, Weiwen Xu, Ruochen Zhao, Fangkai Jiao, Shafiq Joty, and Lidong Bing. 2024. Can We Further Elicit Reasoning in LLMs? Critic-Guided Planning with Retrieval-Augmentation for Solving Challenging Tasks. arXiv preprint arXiv:2410.01428 (2024). +[53] Xingxuan Li, Weiwen Xu, Ruochen Zhao, Fangkai Jiao, Shafiq Joty, and Lidong Bing. 2024. Can We Further Elicit Reasoning in LLMs? Critic-Guided Planning with Retrieval-Augmentation for Solving Challenging Tasks. arXiv preprint arXiv:2410.01428 (2024). +[54] Zhuoqun Li, Haiyang Yu, Xuanang Chen, Hongyu Lin, Yaojie Lu, Fei Huang, Xianpei Han, Yongbin Li, and Le Sun. 2025. Deepsolution: Boosting complex engineering solution design via tree-based exploration and bi-point thinking. arXiv preprint arXiv:2502.20730 (2025). +[55] Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. 2023. Let's verify step by step. In *The Twelfth International Conference on Learning Representations*. +[56] Jiaxiang Liu, Yuan Wang, Jiawei Du, Joey Tianyi Zhou, and Zuozhu Liu. 2024. Medcot: Medical chain of thought via hierarchical expert. arXiv preprint arXiv:2412.13736 (2024). +[57] Elias Lumer, Pradeep Honaganahalli Basavaraju, Myles Mason, James A Burke, and Vamse Kumar Subbiah. 2025. Graph RAG-Tool Fusion. arXiv preprint arXiv:2502.07223 (2025). +[58] Haoran Luo, Yikai Guo, Qika Lin, Xiaobao Wu, Xinyu Mu, Wenhao Liu, Meina Song, Yifan Zhu, Luu Anh Tuan, et al. 2025. KBQA-o1: Agentic Knowledge Base Question Answering with Monte Carlo Tree Search. arXiv preprint arXiv:2501.18922 (2025). +[59] Yuanjie Lyu, Zhiyu Li, Simin Niu, Feiyu Xiong, Bo Tang, Wenjin Wang, Hao Wu, Huanyong Liu, Tong Xu, and Enhong Chen. 2025. Crud-rag: A comprehensive chinese benchmark for retrieval-augmented generation of large language models. ACM Transactions on Information Systems 43, 2 (2025), 1-32. +[60] Shengjie Ma, Chengjin Xu, Xuhui Jiang, Muzhi Li, Huaren Qu, Cehao Yang, Jiaxin Mao, and Jian Guo. 2024. Think-on-Graph 2.0: Deep and Faithful Large Language Model Reasoning with Knowledge-guided Retrieval Augmented Generation. arXiv preprint arXiv:2407.10805 (2024). +[61] Xinbei Ma, Yeyun Gong, Pengcheng He, Hai Zhao, and Nan Duan. 2023. Query rewriting in retrieval-augmented large language models. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. 5303-5315. + +[62] Grégoire Mialon, Clémentine Fourrier, Thomas Wolf, Yann LeCun, and Thomas Scialom. 2023. Gaia: a benchmark for general ai assistants. In The Twelfth International Conference on Learning Representations. +[63] Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. 2025. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393 (2025). +[64] Shishir G Patil, Tianjun Zhang, Xin Wang, and Joseph E Gonzalez. 2024. Gorilla: Large language model connected with massive apis. Advances in Neural Information Processing Systems 37 (2024), 126544-126565. +[65] Fabio Petroni, Aleksandra Piktus, Angela Fan, Patrick Lewis, Majid Yazdani, Nicola De Cao, James Thorne, Yacine Jernite, Vladimir Karpukhin, Jean Maillard, et al. 2020. KILT: a benchmark for knowledge intensive language tasks. arXiv preprint arXiv:2009.02252 (2020). +[66] Pouya Pezeshkpour and Estevam Hruschka. 2025. Insight-RAG: Enhancing LLMs with Insight-Driven Augmentation. arXiv preprint arXiv:2504.00187 (2025). +[67] David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R Bowman. 2024. Gpqa: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling. +[68] Zhihong Shao, Yeyun Gong, Yelong Shen, Minlie Huang, Nan Duan, and Weizhu Chen. 2023. Enhancing retrieval-augmented large language models with iterative retrieval-generation synergy. arXiv preprint arXiv:2305.15294 (2023). +[69] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300 (2024). +[70] Quan Shi, Michael Tang, Karthik Narasimhan, and Shunyu Yao. 2024. Can Language Models Solve Olympiad Programming? arXiv preprint arXiv:2404.10952 (2024). +[71] Quan Shi, Michael Tang, Karthik Narasimhan, and Shunyu Yao. 2024. Can Language Models Solve Olympiad Programming? arXiv preprint arXiv:2404.10952 (2024). +[72] Huatong Song, Jinhao Jiang, Yingqian Min, Jie Chen, Zhipeng Chen, Wayne Xin Zhao, Lei Fang, and Ji-Rong Wen. 2025. R1-Searcher: Incentivizing the Search Capability in LLMs via Reinforcement Learning. arXiv preprint arXiv:2503.05592 (2025). +[73] Sakhinana Sagar Srinivas and Venkataramana Runkana. 2025. Scaling Test-Time Inference with Policy-Optimized, Dynamic Retrieval-Augmented Generation via KV Caching and Decoding. arXiv preprint arXiv:2504.01281 (2025). +[74] Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Hanjie Chen, Xia Hu, et al. 2025. Stop overthinking: A survey on efficient reasoning for large language models. arXiv preprint arXiv:2503.16419 (2025). +[75] Zhongxiang Sun, Qipeng Wang, Weijie Yu, Xiaoxue Zang, Kai Zheng, Jun Xu, Xiao Zhang, Song Yang, and Han Li. 2025. ReARTeR: Retrieval-Augmented Reasoning with Trustworthy Process Rewarding. arXiv preprint arXiv:2501.07861 (2025). +[76] Alon Talmor and Jonathan Berant. 2018. The web as a knowledge-base for answering complex questions. arXiv preprint arXiv:1803.06643 (2018). +[77] Hieu Tran, Zonghai Yao, Junda Wang, Yifan Zhang, Zhichao Yang, and Hong Yu. 2024. RARE: Retrieval-Augmented Reasoning Enhancement for Large Language Models. arXiv preprint arXiv:2412.02830 (2024). +[78] Harsh Trivedi, Niranjan Balasubramanian, Tushar Khot, and Ashish Sabharwal. 2022. Interleaving retrieval with chain-of-thought reasoning for knowledge-intensive multi-step questions. arXiv preprint arXiv:2212.10509 (2022). + +[79] Harsh Trivedi, Niranjan Balasubramanian, Tushar Khot, and Ashish Sabharwal. 2022. MuSiQue: Multihop Questions via Single-hop Question Composition. Transactions of the Association for Computational Linguistics 10 (2022), 539-554. +[80] Tu Vu, Mohit Iyyer, Xuezhi Wang, Noah Constant, Jerry Wei, Jason Wei, Chris Tar, Yun-Hsuan Sung, Denny Zhou, Quoc Le, et al. 2023. Freshllms: Refreshing large language models with search engine augmentation. arXiv preprint arXiv:2310.03214 (2023). +[81] Ante Wang, Linfeng Song, Ye Tian, Dian Yu, Haitao Mi, Xiangyu Duan, Zhaopeng Tu, Jinsong Su, and Dong Yu. 2025. Don't Get Lost in the Trees: Streamlining LLM Reasoning by Overcoming Tree Search Exploration Pitfalls. arXiv preprint arXiv:2502.11183 (2025). +[82] Jinyu Wang, Jingjing Fu, Rui Wang, Lei Song, and Jiang Bian. 2025. PIKE-RAG: sPecialized Knowledge and Rationale Augmented Generation. arXiv preprint arXiv:2501.11551 (2025). +[83] Liang Wang, Haonan Chen, Nan Yang, Xiaolong Huang, Zhicheng Dou, and Furu Wei. 2025. Chain-of-Retrieval Augmented Generation. arXiv preprint arXiv:2501.14342 (2025). +[84] Ruobing Wang, Daren Zha, Shi Yu, Qingfei Zhao, Yuxuan Chen, Yixuan Wang, Shuo Wang, Yukun Yan, Zhenghao Liu, Xu Han, et al. 2024. Retriever-and-Memory: Towards Adaptive Note-Enhanced Retrieval-Augmented Generation. arXiv preprint arXiv:2410.08821 (2024). +[85] Siqi Wang, Chao Liang, Yunfan Gao, Yang Liu, Jing Li, and Haofen Wang. 2024. Decoding Urban Industrial Complexity: Enhancing Knowledge-Driven Insights via IndustryScopeGPT. In Proceedings of the 32nd ACM International Conference on Multimedia. 4757-4765. +[86] Shuting Wang, Jiongnan Liu, Shiren Song, Jiehan Cheng, Yuqi Fu, Peidong Guo, Kun Fang, Yutao Zhu, and Zhicheng Dou. 2024. Domainrag: A chinese benchmark for evaluating domain-specific retrieval-augmented generation. arXiv preprint arXiv:2406.05654 (2024). +[87] Xidong Wang, Guiming Hardy Chen, Dingjie Song, Zhiyi Zhang, Zhihong Chen, Qingying Xiao, Feng Jiang, Jianquan Li, Xiang Wan, Benyou Wang, et al. 2023. Cmb: A comprehensive medical benchmark in chinese. arXiv preprint arXiv:2308.08833 (2023). +[88] Xiaohua Wang, Zhenghua Wang, Xuan Gao, Feiran Zhang, Yixin Wu, Zhibo Xu, Tianyuan Shi, Zhengyuan Wang, Shizheng Li, Qi Qian, et al. 2024. Searching for best practices in retrieval-augmented generation. arXiv preprint arXiv:2407.01219 (2024). +[89] Zheng Wang, Zhongyang Li, Zeren Jiang, Dandan Tu, and Wei Shi. 2024. Crafting Personalized Agents through Retrieval-Augmented Generation on Editable Memory Graphs. arXiv preprint arXiv:2409.19401 (2024). +[90] Zhengren Wang, Jiayang Yu, Dongsheng Ma, Zhe Chen, Yu Wang, Zhiyu Li, Feiyu Xiong, Yanfeng Wang, Linpeng Tang, Wentao Zhang, et al. 2025. RARE: Retrieval-Augmented Reasoning Modeling. arXiv preprint arXiv:2503.23513 (2025). +[91] Yixuan Weng, Minjun Zhu, Guangsheng Bao, Hongbo Zhang, Jindong Wang, Yue Zhang, and Linyi Yang. 2024. Cyclereresearcher: Improving automated research via automated review. arXiv preprint arXiv:2411.00816 (2024). +[92] Junde Wu, Jiayuan Zhu, and Yuyuan Liu. 2025. Agentic Reasoning: Reasoning LLMs with Tools for the Deep Research. arXiv preprint arXiv:2502.04644 (2025). +[93] Wenjie Wu, Yongcheng Jing, Yingjie Wang, Wenbin Hu, and Dacheng Tao. 2025. Graph-augmented reasoning: Evolving step-by-step knowledge graph retrieval for llm reasoning. arXiv preprint arXiv:2503.01642 (2025). +[94] Zekun Xi, Wenbiao Yin, Jizhan Fang, Jialong Wu, Runnan Fang, Ningyu Zhang, Jiang Yong, Pengjun Xie, Fei Huang, and Huajun Chen. 2025. OmniThink: Expanding Knowledge Boundaries in Machine Writing through Thinking. arXiv preprint arXiv:2501.09751 (2025). + +[95] Liang Xiao, Wen Dai, Shuai Chen, Bin Qin, Chongyang Shi, Haopeng Jing, and Tianyu Guo. 2025. Retrieval-Augmented Generation by Evidence Retroactivity in LLMs. arXiv preprint arXiv:2501.05475 (2025). +[96] Guangzhi Xiong, Qiao Jin, Xiao Wang, Yin Fang, Haolin Liu, Yifan Yang, Fangyuan Chen, Zhixing Song, Dengyu Wang, Minjia Zhang, et al. 2025. Rag-gym: Optimizing reasoning and search agents with process supervision. arXiv preprint arXiv:2502.13957 (2025). +[97] Guanming Xiong, Haochen Li, and Wen Zhao. 2025. MCTS-KBQA: Monte Carlo Tree Search for Knowledge Base Question Answering. arXiv preprint arXiv:2502.13428 (2025). +[98] Ruibin Xiong, Yimeng Chen, Dmitrii Khizbullin, and Jürgen Schmidhuber. 2025. Beyond Outlining: Heterogeneous Recursive Planning for Adaptive Long-form Writing with Language Models. arXiv preprint arXiv:2503.08275 (2025). +[99] Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, et al. 2025. Towards Large Reasoning Models: A Survey of Reinforced Reasoning with Large Language Models. arXiv preprint arXiv:2501.09686 (2025). +[100] Zhipeng Xu, Zhenghao Liu, Yukun Yan, Shuo Wang, Shi Yu, Zheni Zeng, Chaojun Xiao, Zhiyuan Liu, Ge Yu, and Chenyan Xiong. 2024. ActiveRAG: Autonomous Knowledge Assimilation and Accommodation through Retrieval-Augmented Agents. arXiv preprint arXiv:2402.13547 (2024). +[101] Ruiran Yan, Zheng Liu, and Defu Lian. 2025. O1 embedder: Let retrievers think before action. arXiv preprint arXiv:2502.07555 (2025). +[102] Xiaoming Zhang, Ming Wang, Xiaocui Yang, Daling Wang, Shi Feng, and Yifei Zhang. 2024. Hierarchical Retrieval-Augmented Generation Model with Rethink for Multi-hop Question Answering. arXiv preprint arXiv:2408.11875 (2024). +[103] Zhuocheng Zhang, Yang Feng, and Min Zhang. 2025. LevelRAG: Enhancing Retrieval-Augmented Generation with Multi-hop Logic Planning over Rewriting Augmented Searchers. arXiv preprint arXiv:2502.18139 (2025). +[104] Bowen Zhao, Zander Brumbaugh, Yizhong Wang, Hannaneh Hajishirzi, and Noah A Smith. 2024. Set the clock: Temporal alignment of pretrained language models. arXiv preprint arXiv:2402.16797 (2024). +[105] Xuejiao Zhao, Siyan Liu, Su-Yin Yang, and Chunyan Miao. 2025. MedRAG: Enhancing Retrieval-augmented Generation with Knowledge Graph-Elicited Reasoning for Healthcare Copilot. arXiv preprint arXiv:2502.04413 (2025). +[106] Yuxiang Zheng, Dayuan Fu, Xiangkun Hu, Xiaojie Cai, Lyumanshan Ye, Pengrui Lu, and Pengfei Liu. 2025. DeepResearcher: Scaling Deep Research via Reinforcement Learning in Real-world Environments. arXiv preprint arXiv:2504.03160 (2025). +[107] Yijie Zhong, Feifan Wu, Mengying Guo, Xiaolian Zhang, Meng Wang, and Haofen Wang. 2025. Meta-PKE: Memory-Enhanced Task-Adaptive Personal Knowledge Extraction in Daily Life. Information Processing & Management 62, 4 (2025), 104097. +[108] Yujia Zhou, Zheng Liu, Jiajie Jin, Jian-Yun Nie, and Zhicheng Dou. 2024. Metacognitive retrieval-augmented large language models. In Proceedings of the ACM Web Conference 2024. 1453-1463. +[109] Jiachen Zhu, Congmin Zheng, Jianghao Lin, Kounianhua Du, Ying Wen, Yong Yu, Jun Wang, and Weinan Zhang. 2025. Retrieval-Augmented Process Reward Model for Generalizable Mathematical Reasoning. arXiv preprint arXiv:2502.14361 (2025). +[110] Rongzhi Zhu, Xiangyu Liu, Zequn Sun, Yiwei Wang, and Wei Hu. 2025. Mitigating Lost-in-Retrieval Problems in Retrieval Augmented Multi-Hop Question Answering. arXiv preprint arXiv:2502.14245 (2025). + +# Appendix + +# Agentic RAG Symbol Reference System + +The following table presents a complete symbol reference system with formally defined mathematical notations for all core concepts. + +# Symbol Design Hierarchy + +- Base states/actions: Standard font $(S_{t},a_{t})$ +- Sets/spaces: Calligraphic font $(\mathcal{A},\mathcal{K}_t)$ +- Core mechanism functions: Uppercase Greek $(\Psi, \Gamma)$ +- Operational functions: Calligraphic font $(\mathcal{R},\mathcal{T}_a)$ + +- Auxiliary functions: Lowercase Greek $(\delta, \phi)$ or blackboard bold (I) + +# Annotation Guidelines + +- Symbol disambiguation: + +- $\mathcal{R}$ strictly denotes retrieval function (vs. reward $R$ ) +- $\delta$ exclusively represents state transitions (vs. branch selector $\psi$ ) + +- Dynamic extensions: + +- Action space $\mathcal{A}$ and knowledge base $\mathcal{K}_t$ support incremental updates: $\mathcal{K}_{t + 1} = \mathcal{K}_t\oplus \mathrm{Retrieve}(q_t)$ + +Table 3. Basic states and system components + +
SymbolTypeDefinition & Description
St=(Ht,Ct)Composite stateComplete system state at timestep t, containing historical information and context vectors
HtVector/SetHistorical information aggregation
CtVectorContextual embedding vectors
qtVectorVector representation of current query at step t
KtSetDynamic knowledge base ( Initialized as K0=∅)
+ +Table 4. Action space and policy definitions + +
SymbolTypeDefinition & Description
ASetAction space, e.g., A = {Retrieve, Generate, Verify, Terminate}
atScalarSelected action at timestep t (at ∈ A)
π(St; Θ)FunctionPolicy function with parameters Θ, mapping states to action probability distributions (π: S → Δ(A))
+ +Table 5. State transition mechanisms + +
SymbolTypeDefinition & Description
δFunctionState transition function, update rule St+1 = δ(St, ·)
TaFunctionLow-level state transition operation for action a (e.g., TRetrieve denotes retrieval)
RFunctionRetrieval function, R(St) returns retrieval results
OperatorFunction composition operator (e.g., f∘g(x) = f(g(x)))
+ +Table 6. Feedback and optimization components + +
SymbolTypeDefinition & Description
R(St, at, St+1)FunctionReward function, outputs reward value rt
I(·)FunctionIndicator function (returns 1 if condition holds, else 0)
∇θJ(θ)OperatorPolicy gradient for optimizing policy parameters Θ
γScalarDiscount factor for cumulative reward calculation
+ +Table 7. Submodule-specific symbols + +
SymbolTypeDefinition & Description
ΨFunctionReasoning function, generates intermediate reasoning results
ΓFunctionDecision function, produces final outputs (e.g., answers)
ψ(·)FunctionBranch selector for reflective reasoning path selection
φ(·)FunctionConfidence mapping function (evaluations to scalar confidence)
τScalarDecision threshold for triggering specific operations (e.g., verification/termination)
\ No newline at end of file diff --git a/data/2025/2504_15xxx/2504.15909/images/026818ed8e9d2019b420881aa9487005b319ec64912505ff456df2d7da6e3b61.jpg b/data/2025/2504_15xxx/2504.15909/images/026818ed8e9d2019b420881aa9487005b319ec64912505ff456df2d7da6e3b61.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1360161ebd3e6293a505e79e49d597692e4822c4 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/026818ed8e9d2019b420881aa9487005b319ec64912505ff456df2d7da6e3b61.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7accdd8d48ec929f4c65cf6a1d1932c4702e953f6a66bdd7b9d0d8eceedd60e4 +size 3089 diff --git a/data/2025/2504_15xxx/2504.15909/images/044160498b6a6dec3d4b731753eb83312cd53fabf064ad53ce6793173d12947b.jpg b/data/2025/2504_15xxx/2504.15909/images/044160498b6a6dec3d4b731753eb83312cd53fabf064ad53ce6793173d12947b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..49fc02af2bb4cacf3d979127ead66595a8000204 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/044160498b6a6dec3d4b731753eb83312cd53fabf064ad53ce6793173d12947b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2482dfa2951681206ff2d53c6f249215309a4847e3b910f39da906a33f46612 +size 118819 diff --git a/data/2025/2504_15xxx/2504.15909/images/09571464f1bd88bbbe376dc373e70dd2c58a83763d1dbb94f0dcc2d042a01304.jpg b/data/2025/2504_15xxx/2504.15909/images/09571464f1bd88bbbe376dc373e70dd2c58a83763d1dbb94f0dcc2d042a01304.jpg new file mode 100644 index 0000000000000000000000000000000000000000..71f69d063ea9570084c8a7b2b3dde1f387b04766 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/09571464f1bd88bbbe376dc373e70dd2c58a83763d1dbb94f0dcc2d042a01304.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:892e1a3b8a4d3c43863bc52421ac3cbd4059537b14fb9a80bce88f0b4b699ddb +size 1588 diff --git a/data/2025/2504_15xxx/2504.15909/images/0bb1f01e1049a5a3fe1d835a65ca2f133ec47d989cefa8141411e914b10cf409.jpg b/data/2025/2504_15xxx/2504.15909/images/0bb1f01e1049a5a3fe1d835a65ca2f133ec47d989cefa8141411e914b10cf409.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d3b70bf0ead93d6d5581aa0120dfca45ef554878 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/0bb1f01e1049a5a3fe1d835a65ca2f133ec47d989cefa8141411e914b10cf409.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d58eeb4af8433f4ed3cb7b0a5e000536277d5f6d445f3caa5940cfa01f3afbf4 +size 5669 diff --git a/data/2025/2504_15xxx/2504.15909/images/11eeb8f217c0603397468d6f575850696bfced99e1ffb539266f4c9bacb0de5f.jpg b/data/2025/2504_15xxx/2504.15909/images/11eeb8f217c0603397468d6f575850696bfced99e1ffb539266f4c9bacb0de5f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..01f512d0582c37ab076a4930eaab67ae0da9bd44 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/11eeb8f217c0603397468d6f575850696bfced99e1ffb539266f4c9bacb0de5f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9c056b4e5064e9ab660935f3bfd08b801e9b502dd62667f98826be9d7550af4 +size 3103 diff --git a/data/2025/2504_15xxx/2504.15909/images/134e9289b64c48e173041598397d78a2ffff93d776484a12645311f697b63f1f.jpg b/data/2025/2504_15xxx/2504.15909/images/134e9289b64c48e173041598397d78a2ffff93d776484a12645311f697b63f1f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68be30e21348b053982f1c6b3e9c42b911ee385c --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/134e9289b64c48e173041598397d78a2ffff93d776484a12645311f697b63f1f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:686bea26d50477ac792f35ab93cdf6960f92de8aab96d5ac24f2bb5acd356425 +size 1382 diff --git a/data/2025/2504_15xxx/2504.15909/images/1d848e6509a2fa6c7cfdf8b5bb7b9054f778a70c266ccc426354134081ddf86d.jpg b/data/2025/2504_15xxx/2504.15909/images/1d848e6509a2fa6c7cfdf8b5bb7b9054f778a70c266ccc426354134081ddf86d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9c7b48dbc4c0c78e948081c57ac3407597117a19 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/1d848e6509a2fa6c7cfdf8b5bb7b9054f778a70c266ccc426354134081ddf86d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:317b5ebc466e029a870a000d2000882be1740b2c753e05c73e47e7060ffe8e9a +size 45875 diff --git a/data/2025/2504_15xxx/2504.15909/images/1e880ccde31a88477a8599518908e126cc979da6226fba098d13340d6687a5c6.jpg b/data/2025/2504_15xxx/2504.15909/images/1e880ccde31a88477a8599518908e126cc979da6226fba098d13340d6687a5c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c2920a80d4426afd92ca33eb55f17d1f8bb51e4d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/1e880ccde31a88477a8599518908e126cc979da6226fba098d13340d6687a5c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0c8013139d4495b3b93899cf55b3f22d06137ecd5a41e1e4605a265dfa7f03f +size 51948 diff --git a/data/2025/2504_15xxx/2504.15909/images/260a7df8affafd639348b6224f0dabe59b9ebb2d9c5673bcb9348e540aa9a196.jpg b/data/2025/2504_15xxx/2504.15909/images/260a7df8affafd639348b6224f0dabe59b9ebb2d9c5673bcb9348e540aa9a196.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d749206e4bcb24d13ef78f5ed9909d5d75412a7 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/260a7df8affafd639348b6224f0dabe59b9ebb2d9c5673bcb9348e540aa9a196.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f57af7bfb8d893cddcfd400c7cab88e7a1f157612ac1612731cf4cf125b36705 +size 3705 diff --git a/data/2025/2504_15xxx/2504.15909/images/3b93db755ec01b5c451d164e3cc6cc14ab28488e52aa26dae925d402e660ea21.jpg b/data/2025/2504_15xxx/2504.15909/images/3b93db755ec01b5c451d164e3cc6cc14ab28488e52aa26dae925d402e660ea21.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c5aeddec48ee7f6d9d57ac17f4eba0294378964 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/3b93db755ec01b5c451d164e3cc6cc14ab28488e52aa26dae925d402e660ea21.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:586b3ee080111f053ab910c060cc6748bea76178815ade32daed31dbf86222ff +size 30202 diff --git a/data/2025/2504_15xxx/2504.15909/images/3ea9125fe5a9cbbb68a717cc756f0dcb1188de6dd79cef3d1bd466d0563c9e11.jpg b/data/2025/2504_15xxx/2504.15909/images/3ea9125fe5a9cbbb68a717cc756f0dcb1188de6dd79cef3d1bd466d0563c9e11.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b84fde8dbd725be03c8c19482e2d7d7d3cd4b9ee --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/3ea9125fe5a9cbbb68a717cc756f0dcb1188de6dd79cef3d1bd466d0563c9e11.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5af91c7537826ff6a63450334c95302ef23074281e6f5fe23cbb9e9e4d1406e8 +size 5721 diff --git a/data/2025/2504_15xxx/2504.15909/images/41e010600a7e8b306e4b7d692efd51a598046a1382a35dd7bb01fd4aa49ff2f2.jpg b/data/2025/2504_15xxx/2504.15909/images/41e010600a7e8b306e4b7d692efd51a598046a1382a35dd7bb01fd4aa49ff2f2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b3590db8cd3ac92a7d639be23d286eb3831689b4 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/41e010600a7e8b306e4b7d692efd51a598046a1382a35dd7bb01fd4aa49ff2f2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d7b47f883b26ccd84d53a39e6387e461beea7e95ebb23164fbe8d510c5ba69b +size 169131 diff --git a/data/2025/2504_15xxx/2504.15909/images/43b9c3e6021b5e7e0d22dfa723c29a2ecc7d344929cf0de432c2f1383653b5ca.jpg b/data/2025/2504_15xxx/2504.15909/images/43b9c3e6021b5e7e0d22dfa723c29a2ecc7d344929cf0de432c2f1383653b5ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..049d9357eade4a869f40df8e241b6acd52c1b805 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/43b9c3e6021b5e7e0d22dfa723c29a2ecc7d344929cf0de432c2f1383653b5ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81be85a41664cd8de42d5ddb2dc71b5ca3a22c1eeb6d0f2e14a466b1f5c4ed87 +size 142929 diff --git a/data/2025/2504_15xxx/2504.15909/images/47ae8d3debcbe5491ba6ec372772b4cd0ce868b70cfe808a11723bf34a8fc6af.jpg b/data/2025/2504_15xxx/2504.15909/images/47ae8d3debcbe5491ba6ec372772b4cd0ce868b70cfe808a11723bf34a8fc6af.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3926c6972edaaff154cdbf4d81d435f5f722101b --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/47ae8d3debcbe5491ba6ec372772b4cd0ce868b70cfe808a11723bf34a8fc6af.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bfad888e1e5f4a16e7800aa67ed01d8691884642c8c4f58c97d3240c86cb7e2 +size 338420 diff --git a/data/2025/2504_15xxx/2504.15909/images/4d1bcbeaa0e278e95266fc849a9e6ebfbf2b494143d1d43c264d056f0d8e6ecf.jpg b/data/2025/2504_15xxx/2504.15909/images/4d1bcbeaa0e278e95266fc849a9e6ebfbf2b494143d1d43c264d056f0d8e6ecf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2514ecef653ca5dd8d946e4ce4923981a531093d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/4d1bcbeaa0e278e95266fc849a9e6ebfbf2b494143d1d43c264d056f0d8e6ecf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:541857f7c8b92b2aed295040d99cc6ae6c852c14a24693588decda92637952c1 +size 48268 diff --git a/data/2025/2504_15xxx/2504.15909/images/50f75c55705e9ac547c3495a6dc4aa6204a63d2547e17f7bca84b3e970ddf703.jpg b/data/2025/2504_15xxx/2504.15909/images/50f75c55705e9ac547c3495a6dc4aa6204a63d2547e17f7bca84b3e970ddf703.jpg new file mode 100644 index 0000000000000000000000000000000000000000..989205a8d2e9e5f6f9cb035b414cc91a61db7f71 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/50f75c55705e9ac547c3495a6dc4aa6204a63d2547e17f7bca84b3e970ddf703.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6500211d11a5731280109de0026f926c5f79ac3a2b1ab651938ec322136e5468 +size 5134 diff --git a/data/2025/2504_15xxx/2504.15909/images/5456685368ffe44fb4c5b81029bd5ef81d13e2a9f1ac24b37bf34bf87ce8844d.jpg b/data/2025/2504_15xxx/2504.15909/images/5456685368ffe44fb4c5b81029bd5ef81d13e2a9f1ac24b37bf34bf87ce8844d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4f8c55aa59d4c3bc028fa1d1363c5d66383d5a40 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/5456685368ffe44fb4c5b81029bd5ef81d13e2a9f1ac24b37bf34bf87ce8844d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:032a449f3f3f161b852b0a4eb84baf0ed198b7b441da2c8e288a04c3b8f8d072 +size 125086 diff --git a/data/2025/2504_15xxx/2504.15909/images/6040981e5ae6dc2acb3aecd45dada104998a9d10d8cf8028b4baf25c8047693b.jpg b/data/2025/2504_15xxx/2504.15909/images/6040981e5ae6dc2acb3aecd45dada104998a9d10d8cf8028b4baf25c8047693b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..58ec7adbb1ba57cddb269acbc6bc5a8dd1c12e7c --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/6040981e5ae6dc2acb3aecd45dada104998a9d10d8cf8028b4baf25c8047693b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09fbd3801e11b706f2e70829304bf5469b9e80d32ddac93628bf805cea5a7b29 +size 6735 diff --git a/data/2025/2504_15xxx/2504.15909/images/7887c1ca647a9f89e6f4474b770fc82df542d6a89ee571cf004fffbd171081d9.jpg b/data/2025/2504_15xxx/2504.15909/images/7887c1ca647a9f89e6f4474b770fc82df542d6a89ee571cf004fffbd171081d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f1212efa44a29e9ce76f241d5aa1fbb49f2040f --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/7887c1ca647a9f89e6f4474b770fc82df542d6a89ee571cf004fffbd171081d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:293c318fb4b39319a6c3a557b3ae52ad4a0360baded7d30dc3494eb5445a77e0 +size 266855 diff --git a/data/2025/2504_15xxx/2504.15909/images/7ac1eba9b0f22ec452c189570cac365dfa05f5cba364f27f64c05775b1c82bff.jpg b/data/2025/2504_15xxx/2504.15909/images/7ac1eba9b0f22ec452c189570cac365dfa05f5cba364f27f64c05775b1c82bff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a13c1372b033b13366108d7373426db15308b58a --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/7ac1eba9b0f22ec452c189570cac365dfa05f5cba364f27f64c05775b1c82bff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e3519629cba43ab6aefd22a06172bf9b85655fc1398822d2266ec94a7745638 +size 174576 diff --git a/data/2025/2504_15xxx/2504.15909/images/8b88426292f7db739f94523b48b34208c47731f678bdd165bde3f6c09889658d.jpg b/data/2025/2504_15xxx/2504.15909/images/8b88426292f7db739f94523b48b34208c47731f678bdd165bde3f6c09889658d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0b4e648a60681bf015276965041c85c818e562e3 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/8b88426292f7db739f94523b48b34208c47731f678bdd165bde3f6c09889658d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed76cd76e1d6a7b39ca62d5a664890557957c424a3be6bccb773638f60f5bc78 +size 4981 diff --git a/data/2025/2504_15xxx/2504.15909/images/9e84b6f53ff577c819991b081153340f022d99ed4e4207a9a3ce616a9fa9e815.jpg b/data/2025/2504_15xxx/2504.15909/images/9e84b6f53ff577c819991b081153340f022d99ed4e4207a9a3ce616a9fa9e815.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8049438f1c6d01c976cc05d30f20ffa13886cadf --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/9e84b6f53ff577c819991b081153340f022d99ed4e4207a9a3ce616a9fa9e815.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fa247cc07d69bc7ee9db3f58364de17e7451adbea8465e84d9221e834dd7ad8 +size 114395 diff --git a/data/2025/2504_15xxx/2504.15909/images/a797186982b7420dcac71a470f6aca1de11923d2ffcfd02c0fb32375430a9b11.jpg b/data/2025/2504_15xxx/2504.15909/images/a797186982b7420dcac71a470f6aca1de11923d2ffcfd02c0fb32375430a9b11.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4515718d2f423ad8f2e2e2013f0f5dc6260979af --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/a797186982b7420dcac71a470f6aca1de11923d2ffcfd02c0fb32375430a9b11.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:328bd696e8fcef9efb044c23a1ce0e3fe3bb34de2b9f1a76f5fec036a144d5c4 +size 65930 diff --git a/data/2025/2504_15xxx/2504.15909/images/a92e1ff229df312dcac56e5cfb3e551bc2263f588f8e2558c8d7b588227aff0d.jpg b/data/2025/2504_15xxx/2504.15909/images/a92e1ff229df312dcac56e5cfb3e551bc2263f588f8e2558c8d7b588227aff0d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bff8ee28e2e47d3c8f04d26ed0215a649382d3f5 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/a92e1ff229df312dcac56e5cfb3e551bc2263f588f8e2558c8d7b588227aff0d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6c792a507abd32166fbba3f93a9e6e85f16f7e78d2e60b9e99774d16930cb45 +size 354967 diff --git a/data/2025/2504_15xxx/2504.15909/images/b716652fd0755f035de4e2f35034eceb961ed32d5c7dce43c618b2799251bf91.jpg b/data/2025/2504_15xxx/2504.15909/images/b716652fd0755f035de4e2f35034eceb961ed32d5c7dce43c618b2799251bf91.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fead71ff65f59f4965311643d6a29481fa6e590e --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/b716652fd0755f035de4e2f35034eceb961ed32d5c7dce43c618b2799251bf91.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f83ee93db4dd5b723ae3250b4ec5068ed59360190ac272f96bf2b9f89371b467 +size 6769 diff --git a/data/2025/2504_15xxx/2504.15909/images/cc011ed02bfe16c008fa59b27f259cd658bc4f9700c3e26493d504dd622d891f.jpg b/data/2025/2504_15xxx/2504.15909/images/cc011ed02bfe16c008fa59b27f259cd658bc4f9700c3e26493d504dd622d891f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b3f0e51774c27e34921ba478961f6fb8723a45ab --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/cc011ed02bfe16c008fa59b27f259cd658bc4f9700c3e26493d504dd622d891f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:112bcc6223953345a8d860639a37c60a1e222ceef97b16c9ba816ce2c7d8bfc6 +size 75988 diff --git a/data/2025/2504_15xxx/2504.15909/images/d1f3e9f6c15da1afab15a3d990fdc1295647b11a1816d189b65c2a07934958e2.jpg b/data/2025/2504_15xxx/2504.15909/images/d1f3e9f6c15da1afab15a3d990fdc1295647b11a1816d189b65c2a07934958e2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56350ef1d02c0b50a71d2d66a0efc1e59072378a --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/d1f3e9f6c15da1afab15a3d990fdc1295647b11a1816d189b65c2a07934958e2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dea841745cd06b2b7ce9eb0525b255b4365802474528b5dc0770c1a0334203c +size 54267 diff --git a/data/2025/2504_15xxx/2504.15909/images/d542cd58bb3396b896457f99ce48009a442553bf91f1c366bd9dea3388cd6f2c.jpg b/data/2025/2504_15xxx/2504.15909/images/d542cd58bb3396b896457f99ce48009a442553bf91f1c366bd9dea3388cd6f2c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..81cbc2a5938b7c9ce1a900147fe15ce07a0e67ab --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/d542cd58bb3396b896457f99ce48009a442553bf91f1c366bd9dea3388cd6f2c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de600df3d56b63f67667080b8326b1dd78080ed26fec85ecd2bc5115b1a4fef7 +size 3657 diff --git a/data/2025/2504_15xxx/2504.15909/images/e3f98099e87667aff955149c5c01090756f51e471b3f9c11051c7cdc075eabb0.jpg b/data/2025/2504_15xxx/2504.15909/images/e3f98099e87667aff955149c5c01090756f51e471b3f9c11051c7cdc075eabb0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..951cc1288a2adee8ea99e7f27aedf1b5f5f7fba8 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/e3f98099e87667aff955149c5c01090756f51e471b3f9c11051c7cdc075eabb0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8794c02c65039e565270f230fb1860e2ee4792e0d91c26858280be4c4c0e43e +size 5443 diff --git a/data/2025/2504_15xxx/2504.15909/images/ea63eb4b161da6fe9a953b4d2d131a0946aee8e4aa2d5f88da7c9c7c4c90820f.jpg b/data/2025/2504_15xxx/2504.15909/images/ea63eb4b161da6fe9a953b4d2d131a0946aee8e4aa2d5f88da7c9c7c4c90820f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..42425e01fc6cc3f8314099c7c0ee8f545e55ce51 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/ea63eb4b161da6fe9a953b4d2d131a0946aee8e4aa2d5f88da7c9c7c4c90820f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef95b9359ab7548bfea6e5a6368fef1c0fcc9b005b7f03f3212cb1bb2deb72aa +size 42807 diff --git a/data/2025/2504_15xxx/2504.15909/images/f1d1f0959136d71cea86d5be0b8740d2d2d379b3fc2093488ed9cf7e2243fbb0.jpg b/data/2025/2504_15xxx/2504.15909/images/f1d1f0959136d71cea86d5be0b8740d2d2d379b3fc2093488ed9cf7e2243fbb0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..39abcf473017e6fcf0f05a5a29bd46b54ddeb128 --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/images/f1d1f0959136d71cea86d5be0b8740d2d2d379b3fc2093488ed9cf7e2243fbb0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c22085b373e85183022f06f8dc227d7bc4c50fc9356c94d537217d5aa06c496a +size 2795 diff --git a/data/2025/2504_15xxx/2504.15909/layout.json b/data/2025/2504_15xxx/2504.15909/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..ba9d6b7e9d9485e33d617feea0c307d814abea3d --- /dev/null +++ b/data/2025/2504_15xxx/2504.15909/layout.json @@ -0,0 +1,24588 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 51, + 69, + 559, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 69, + 559, + 94 + ], + "spans": [ + { + "bbox": [ + 51, + 69, + 559, + 94 + ], + "type": "text", + "content": "Synergizing RAG and Reasoning: A Systematic Review" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 102, + 171, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 102, + 171, + 114 + ], + "spans": [ + { + "bbox": [ + 111, + 102, + 171, + 114 + ], + "type": "text", + "content": "Yunfan Gao" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 75, + 115, + 208, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 115, + 208, + 126 + ], + "spans": [ + { + "bbox": [ + 75, + 115, + 208, + 126 + ], + "type": "text", + "content": "Shanghai Research Institute for" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 73, + 128, + 211, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 128, + 211, + 139 + ], + "spans": [ + { + "bbox": [ + 73, + 128, + 211, + 139 + ], + "type": "text", + "content": "Intelligent Autonomous Systems," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 140, + 178, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 140, + 178, + 151 + ], + "spans": [ + { + "bbox": [ + 105, + 140, + 178, + 151 + ], + "type": "text", + "content": "Tongji University" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 128, + 152, + 154, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 152, + 154, + 161 + ], + "spans": [ + { + "bbox": [ + 128, + 152, + 154, + 161 + ], + "type": "text", + "content": "China" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 163, + 198, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 163, + 198, + 175 + ], + "spans": [ + { + "bbox": [ + 83, + 163, + 198, + 175 + ], + "type": "text", + "content": "gaoyunfan1602@gmail.com" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 122, + 184, + 160, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 184, + 160, + 196 + ], + "spans": [ + { + "bbox": [ + 122, + 184, + 160, + 196 + ], + "type": "text", + "content": "Yuxi Bi" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 71, + 198, + 212, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 198, + 212, + 209 + ], + "spans": [ + { + "bbox": [ + 71, + 198, + 212, + 209 + ], + "type": "text", + "content": "College of Design and Innovation," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 209, + 177, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 209, + 177, + 221 + ], + "spans": [ + { + "bbox": [ + 105, + 209, + 177, + 221 + ], + "type": "text", + "content": "Tongji University" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 128, + 222, + 154, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 222, + 154, + 232 + ], + "spans": [ + { + "bbox": [ + 128, + 222, + 154, + 232 + ], + "type": "text", + "content": "China" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 102, + 234, + 180, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 102, + 234, + 180, + 245 + ], + "spans": [ + { + "bbox": [ + 102, + 234, + 180, + 245 + ], + "type": "text", + "content": "yuxibi@gmail.com" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 278, + 102, + 332, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 278, + 102, + 332, + 115 + ], + "spans": [ + { + "bbox": [ + 278, + 102, + 332, + 115 + ], + "type": "text", + "content": "Yun Xiong" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 237, + 116, + 374, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 116, + 374, + 127 + ], + "spans": [ + { + "bbox": [ + 237, + 116, + 374, + 127 + ], + "type": "text", + "content": "Shanghai Key Laboratory of Data" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 229, + 128, + 382, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 229, + 128, + 382, + 139 + ], + "spans": [ + { + "bbox": [ + 229, + 128, + 382, + 139 + ], + "type": "text", + "content": "Science, School of Computer Science," + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 269, + 140, + 341, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 269, + 140, + 341, + 150 + ], + "spans": [ + { + "bbox": [ + 269, + 140, + 341, + 150 + ], + "type": "text", + "content": "Fudan University" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 291, + 152, + 318, + 161 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 291, + 152, + 318, + 161 + ], + "spans": [ + { + "bbox": [ + 291, + 152, + 318, + 161 + ], + "type": "text", + "content": "China" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 263, + 163, + 347, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 163, + 347, + 175 + ], + "spans": [ + { + "bbox": [ + 263, + 163, + 347, + 175 + ], + "type": "text", + "content": "yunx@fudan.edu.cn" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 280, + 184, + 330, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 184, + 330, + 197 + ], + "spans": [ + { + "bbox": [ + 280, + 184, + 330, + 197 + ], + "type": "text", + "content": "Ming Xue" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 282, + 198, + 328, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 282, + 198, + 328, + 208 + ], + "spans": [ + { + "bbox": [ + 282, + 198, + 328, + 208 + ], + "type": "text", + "content": "Percena AI" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 292, + 210, + 318, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 292, + 210, + 318, + 220 + ], + "spans": [ + { + "bbox": [ + 292, + 210, + 318, + 220 + ], + "type": "text", + "content": "China" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 267, + 222, + 343, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 267, + 222, + 343, + 232 + ], + "spans": [ + { + "bbox": [ + 267, + 222, + 343, + 232 + ], + "type": "text", + "content": "mxue@percena.co" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 440, + 102, + 498, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 440, + 102, + 498, + 115 + ], + "spans": [ + { + "bbox": [ + 440, + 102, + 498, + 115 + ], + "type": "text", + "content": "Yijie Zhong" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 400, + 116, + 540, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 400, + 116, + 540, + 127 + ], + "spans": [ + { + "bbox": [ + 400, + 116, + 540, + 127 + ], + "type": "text", + "content": "College of Design and Innovation," + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 433, + 128, + 506, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 433, + 128, + 506, + 139 + ], + "spans": [ + { + "bbox": [ + 433, + 128, + 506, + 139 + ], + "type": "text", + "content": "Tongji University" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 456, + 140, + 482, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 456, + 140, + 482, + 148 + ], + "spans": [ + { + "bbox": [ + 456, + 140, + 482, + 148 + ], + "type": "text", + "content": "China" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 423, + 151, + 515, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 423, + 151, + 515, + 163 + ], + "spans": [ + { + "bbox": [ + 423, + 151, + 515, + 163 + ], + "type": "text", + "content": "dun.haski@gmail.com" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 433, + 184, + 505, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 433, + 184, + 505, + 198 + ], + "spans": [ + { + "bbox": [ + 433, + 184, + 505, + 198 + ], + "type": "text", + "content": "Haofen Wang*" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 400, + 198, + 539, + 209 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 400, + 198, + 539, + 209 + ], + "spans": [ + { + "bbox": [ + 400, + 198, + 539, + 209 + ], + "type": "text", + "content": "College of Design and Innovation," + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 433, + 209, + 505, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 433, + 209, + 505, + 221 + ], + "spans": [ + { + "bbox": [ + 433, + 209, + 505, + 221 + ], + "type": "text", + "content": "Tongji University" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 456, + 222, + 482, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 456, + 222, + 482, + 232 + ], + "spans": [ + { + "bbox": [ + 456, + 222, + 482, + 232 + ], + "type": "text", + "content": "China" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 410, + 234, + 528, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 410, + 234, + 528, + 245 + ], + "spans": [ + { + "bbox": [ + 410, + 234, + 528, + 245 + ], + "type": "text", + "content": "carter.whfcarter@gmail.com" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 51, + 253, + 100, + 264 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 253, + 100, + 264 + ], + "spans": [ + { + "bbox": [ + 51, + 253, + 100, + 264 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 50, + 268, + 296, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 268, + 296, + 533 + ], + "spans": [ + { + "bbox": [ + 50, + 268, + 296, + 533 + ], + "type": "text", + "content": "Recent breakthroughs in large language models (LLMs), particularly in reasoning capabilities, have propelled Retrieval-Augmented Generation (RAG) to unprecedented levels. By synergizing retrieval mechanisms with advanced reasoning, LLMs can now tackle increasingly complex problems. This paper presents a systematic review of the collaborative interplay between RAG and reasoning, clearly defining \"reasoning\" within the RAG context. It construct a comprehensive taxonomy encompassing multi-dimensional collaborative objectives, representative paradigms, and technical implementations, and analyze the bidirectional synergy methods. Additionally, we critically evaluate current limitations in RAG assessment, including the absence of intermediate supervision for multi-step reasoning and practical challenges related to cost-risk trade-offs. To bridge theory and practice, we provide practical guidelines tailored to diverse real-world applications. Finally, we identify promising research directions, such as graph-based knowledge integration, hybrid model collaboration, and RL-driven optimization. Overall, this work presents a theoretical framework and practical foundation to advance RAG systems in academia and industry, fostering the next generation of RAG solutions." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 52, + 552, + 141, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 552, + 141, + 563 + ], + "spans": [ + { + "bbox": [ + 52, + 552, + 141, + 563 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 50, + 567, + 295, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 567, + 295, + 688 + ], + "spans": [ + { + "bbox": [ + 50, + 567, + 295, + 688 + ], + "type": "text", + "content": "Recent breakthroughs in Large Language Models (LLMs) like OpenAI O1 [39] and DeepSeek-R1 [25] have shifted the paradigm from \"pre-training scaling\" to \"test-time scaling\" [63]. Unlike traditional language models that improve via corpus accumulation during pre-training, these models enhance performance in complex tasks—such as mathematical derivation and code generation [29]—through post-training innovations during the inference phase (e.g., Long-CoT thinking [8]). This shift has led to the emergence of \"Large Reasoning Models\" (LRMs) [99] with advanced internal reasoning abilities." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 313, + 253, + 561, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 253, + 561, + 372 + ], + "spans": [ + { + "bbox": [ + 313, + 253, + 561, + 372 + ], + "type": "text", + "content": "These advancements have not only boosted basic model capabilities but also opened new avenues for application technologies like Retrieval-Augmented Generation (RAG) [21]. Serving as a key link between language models and external knowledge, RAG overcomes traditional LLMs' limits in knowledge freshness, domain specificity, and factual accuracy by retrieving real-time non-parametric information and integrating it into the context. This enhances information processing and reduces hallucination risks in knowledge-intensive tasks." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 313, + 373, + 560, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 373, + 560, + 457 + ], + "spans": [ + { + "bbox": [ + 313, + 373, + 560, + 457 + ], + "type": "text", + "content": "Technological evolution is advancing RAG architectures through innovations like query rewriting [61], re-ranking [1], and hybrid retrieval [88], creating an Advanced RAG paradigm focused on pre-retrieval optimization and post-retrieval refinement. Modular RAG [22] further breaks down these systems into component-based, service-oriented architectures, using orchestration to tackle practical challenges." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 313, + 457, + 559, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 457, + 559, + 540 + ], + "spans": [ + { + "bbox": [ + 313, + 457, + 559, + 540 + ], + "type": "text", + "content": "Despite improvements in query intent recognition and knowledge use, challenges of RAG remain in demanding tasks like deep research and complex decision-making. Key issues include: 1) difficulty capturing intent from ambiguous queries; 2) poor logical coherence in multi-hop reasoning; 3) efficiency limits of traditional retrieval in open domains; and 4) degraded generation quality from noisy retrieved data." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 313, + 540, + 560, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 540, + 560, + 708 + ], + "spans": [ + { + "bbox": [ + 313, + 540, + 560, + 708 + ], + "type": "text", + "content": "Models like DeepSeek-R1, with strong reasoning capabilities, inspire new directions for RAG systems. As shown in Figure 1, recent research explores integrating formal reasoning frameworks with knowledge retrieval. This approach optimizes retrieval through logic-driven query reformulation and uses reasoning to analyze and validate retrieved knowledge, creating cognitive synergy between retrieval and generation. This paradigm aims to overcome conventional limitations, enabling intelligent systems with rigorous logic and reliable knowledge use. From a trend perspective, an increasing number of methods combine reasoning and retrieval abilities through reinforcement learning (RL), marking a new direction in the LRM era. Meanwhile, prompt-based approaches continue to rapidly evolve, with researchers aiming" + } + ] + } + ], + "index": 40 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 216, + 37, + 560 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 216, + 37, + 560 + ], + "spans": [ + { + "bbox": [ + 14, + 216, + 37, + 560 + ], + "type": "text", + "content": "arXiv:2504.15909v2 [cs.IR] 24 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 51, + 708, + 130, + 718 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 708, + 130, + 718 + ], + "spans": [ + { + "bbox": [ + 51, + 708, + 130, + 718 + ], + "type": "text", + "content": "*Corresponding Author" + } + ] + } + ], + "index": 41 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 70, + 558, + 335 + ], + "blocks": [ + { + "bbox": [ + 53, + 70, + 558, + 335 + ], + "lines": [ + { + "bbox": [ + 53, + 70, + 558, + 335 + ], + "spans": [ + { + "bbox": [ + 53, + 70, + 558, + 335 + ], + "type": "image", + "image_path": "9e84b6f53ff577c819991b081153340f022d99ed4e4207a9a3ce616a9fa9e815.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 344, + 559, + 404 + ], + "lines": [ + { + "bbox": [ + 50, + 344, + 559, + 404 + ], + "spans": [ + { + "bbox": [ + 50, + 344, + 559, + 404 + ], + "type": "text", + "content": "Figure 1. Timeline of studies on RAG-reasoning synergy. From a technical perspective, the approaches can be categorized into Prompt-Based, Tuning-Based, and RL-Based methods. A notable trend is the increasing use of Reinforcement Learning to enhance RAG systems, particularly following the prosperity of test-time scaling. Meanwhile, Prompt-Based and Tuning-Based methods continue to evolve in parallel, demonstrating that there are multiple pathways to integrating reasoning capabilities into RAG systems." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 420, + 295, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 420, + 295, + 468 + ], + "spans": [ + { + "bbox": [ + 50, + 420, + 295, + 468 + ], + "type": "text", + "content": "to achieve results through workflow design while keeping model parameters frozen. Notably, sole reliance on tuning methods is steadily decreasing, suggesting limited improvements from additional fine-tuning at this stage." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 468, + 295, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 468, + 295, + 538 + ], + "spans": [ + { + "bbox": [ + 50, + 468, + 295, + 538 + ], + "type": "text", + "content": "Traditional RAG is limited by its unidirectional flow (retrieval " + }, + { + "bbox": [ + 50, + 468, + 295, + 538 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 50, + 468, + 295, + 538 + ], + "type": "text", + "content": " generation). Integrating reasoning capabilities grants the system greater autonomy, unlocking new possibilities. As shown in Figure 2, this integration is poised to drive major breakthroughs, enabling practical use in complex real-world scenarios." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 540, + 295, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 540, + 295, + 696 + ], + "spans": [ + { + "bbox": [ + 50, + 540, + 295, + 696 + ], + "type": "text", + "content": "1) From Ambiguous Semantic Matching to Logic-Driven Targeted Retrieval. Traditional RAG relies on semantic similarity for retrieval; however, it is sensitive to phrasing variations. Advanced reasoning allows deep logical analysis of queries (e.g., causal links, conditional constraints) to dynamically refine retrieval strategies [24]. For example, to answer \"How to reduce postoperative infection risks in diabetes patients?\", the system prioritizes retrieving \"blood glucose control thresholds\" and \"antibiotic usage guidelines\" over simply matching \"diabetes postoperative care\". This approach supports multi-hop retrieval by breaking down complex queries into sequential sub-queries while preserving cross-document coherence through reasoning chains." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 420, + 559, + 708 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 313, + 420, + 559, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 420, + 559, + 540 + ], + "spans": [ + { + "bbox": [ + 313, + 420, + 559, + 540 + ], + "type": "text", + "content": "2) From Simple Information Aggregation to Logically Coherent Context Construction. Current RAG systems input all retrieved document chunks into context directly, often causing fragmented or contradictory information that confuses LLMs. Reasoning-enhanced systems integrate evidence chains by logically verifying and inferring causality in retrieved content, filtering conflicts and forming coherent explanations [100]. They also use dynamic knowledge completion to detect missing logical links, prompting iterative retrieval or inference to fill gaps [51]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 540, + 559, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 540, + 559, + 659 + ], + "spans": [ + { + "bbox": [ + 313, + 540, + 559, + 659 + ], + "type": "text", + "content": "3) From Simple and Single-Turn QA to Systemic Decision Support. Traditional RAG performs well in factual QA [65] but struggles with multi-step and complex decision-making. Reasoning-integrated systems produce structured reasoning output, enhancing multi-objective optimization to balance retrieval breadth and solution feasibility under various constraints. For example, multiple constraints under different conditions in engineering construction plans [54], and the formulation of diagnosis and treatment plans for various diseases in the medical field [105]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 659, + 559, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 659, + 559, + 708 + ], + "spans": [ + { + "bbox": [ + 313, + 659, + 559, + 708 + ], + "type": "text", + "content": "4) From Indiscriminate Retrieval to Intelligent Resource Allocation. Traditional RAG retrieves documents for all queries, regardless of complexity. Reasoning-enhanced systems use on-demand retrieval, handling simple queries" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "spans": [ + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "type": "text", + "content": "Gao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 70, + 560, + 289 + ], + "blocks": [ + { + "bbox": [ + 52, + 70, + 560, + 289 + ], + "lines": [ + { + "bbox": [ + 52, + 70, + 560, + 289 + ], + "spans": [ + { + "bbox": [ + 52, + 70, + 560, + 289 + ], + "type": "image", + "image_path": "41e010600a7e8b306e4b7d692efd51a598046a1382a35dd7bb01fd4aa49ff2f2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 185, + 299, + 425, + 312 + ], + "lines": [ + { + "bbox": [ + 185, + 299, + 425, + 312 + ], + "spans": [ + { + "bbox": [ + 185, + 299, + 425, + 312 + ], + "type": "text", + "content": "Figure 2. Advantages of Combining RAG with Reasoning" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 327, + 295, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 327, + 295, + 375 + ], + "spans": [ + { + "bbox": [ + 50, + 327, + 295, + 375 + ], + "type": "text", + "content": "with direct generation and complex ones with multi-round retrieval to reduce latency [20]. Dynamic retrieval pruning uses pre-reasoning predictions to target key information, minimizing unnecessary document and graph traversal [41]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 375, + 295, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 375, + 295, + 483 + ], + "spans": [ + { + "bbox": [ + 50, + 375, + 295, + 483 + ], + "type": "text", + "content": "5) From Passive Knowledge Tool to Proactive Cognitive Assistant. Advancing beyond reactive knowledge retrieval, reasoning-enhanced systems can proactively serve users by asking clarifying questions and anticipating implicit needs. This shift enables human-like assistants that integrate memory, reasoning, and decision-making, proving especially valuable for complex tasks such as deep research [43], business analytics [50], personal assistant [107] and urban planning [85]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 483, + 295, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 483, + 295, + 577 + ], + "spans": [ + { + "bbox": [ + 50, + 483, + 295, + 577 + ], + "type": "text", + "content": "However, the synergistic pathway between RAG and reasoning requires more than simply replacing conventional generative LLMs with LRM modules. It necessitates deep integration of technological evolution insights from LRM - achieved through reconstructing knowledge retrieval mechanisms and strengthening reasoning-generation collaborative linkages - to enable system-level enhancement of cognitive capabilities within the RAG architecture." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 578, + 295, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 578, + 295, + 685 + ], + "spans": [ + { + "bbox": [ + 50, + 578, + 295, + 685 + ], + "type": "text", + "content": "Therefore, this paper aims to address the pivotal and forward-looking research question of \"how RAG systems can synergize with reasoning capabilities\". We systematically review current studies after 2024 while establishing explicit definitions for reasoning within RAG contexts. Building on this foundation, we provide an in-depth taxonomy and analysis of the objectives, typical patterns, and implementations underlying RAG-reasoning integration, clarifying key technological trajectories and critical breakthroughs." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 685, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 685, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 51, + 685, + 295, + 710 + ], + "type": "text", + "content": "As RAG technology enters its next developmental phase, downstream task complexity has escalated significantly -" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 327, + 560, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 327, + 560, + 458 + ], + "spans": [ + { + "bbox": [ + 313, + 327, + 560, + 458 + ], + "type": "text", + "content": "particularly evident in emerging challenges like Deep Research [106]. These advanced applications not only demand enhanced reasoning capacities but also drive RAG's expansion into multimodal, cross-domain, and dynamic environments. However, while the integration of reasoning capabilities demonstrably improves complex task performance, existing research frequently overlooks associated computational overheads and potential risks. Through systematic examination of these operational constraints and analysis of industry applications, we propose practical guidelines for multiple real-world scenarios with diverse requirements." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 459, + 559, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 459, + 559, + 542 + ], + "spans": [ + { + "bbox": [ + 313, + 459, + 559, + 542 + ], + "type": "text", + "content": "Finally, we outline future research directions grounded in current technological evolution, including: 1) RAG-graph architecture integration, 2) coordinated multimodal reasoning frameworks, 3) hybrid model collaboration, and 4) RL optimization specifically designed for RAG systems. This work establishes both theoretical foundations and practical roadmaps for subsequent research in this evolving field." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 543, + 558, + 564 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 543, + 558, + 564 + ], + "spans": [ + { + "bbox": [ + 314, + 543, + 558, + 564 + ], + "type": "text", + "content": "The contributions of this paper can be summarized as follows:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 331, + 574, + 567, + 718 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 331, + 574, + 559, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 574, + 559, + 633 + ], + "spans": [ + { + "bbox": [ + 331, + 574, + 559, + 633 + ], + "type": "text", + "content": "- Pioneering Review. This work represents the first comprehensive survey focusing on the integration of RAG with reasoning, offering novel insights and forward-looking guidance for advancing this emerging research frontier." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 331, + 634, + 567, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 634, + 567, + 693 + ], + "spans": [ + { + "bbox": [ + 331, + 634, + 567, + 693 + ], + "type": "text", + "content": "- Systematic Taxonomy. We present a multi-dimensional framework to systematically examine the objectives, paradigms, and methodologies for combining RAG with reasoning capabilities, establishing clear classification criteria across technical dimensions." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 331, + 694, + 559, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 694, + 559, + 718 + ], + "spans": [ + { + "bbox": [ + 331, + 694, + 559, + 718 + ], + "type": "text", + "content": "- Practical Guidance. Beyond theoretical exploration, we critically discuss the additional cost and potential" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "text", + "content": "Synergizing RAG and Reasoning: A Systematic Review" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 399, + 47, + 559, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 47, + 559, + 57 + ], + "spans": [ + { + "bbox": [ + 399, + 47, + 559, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 75, + 72, + 295, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 72, + 295, + 106 + ], + "spans": [ + { + "bbox": [ + 75, + 72, + 295, + 106 + ], + "type": "text", + "content": "risks associated with the introduction of reasoning, accompanied by an actionable Practical Guide for real-world scenarios." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 108, + 295, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 108, + 295, + 156 + ], + "spans": [ + { + "bbox": [ + 67, + 108, + 295, + 156 + ], + "type": "text", + "content": "- Open Resource Platform1 Through the OpenRAG platform, we provide a rich, multi-dimensional review of related work, which allows readers to quickly search and compare different methods." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 167, + 124, + 179 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 167, + 124, + 179 + ], + "spans": [ + { + "bbox": [ + 51, + 167, + 124, + 179 + ], + "type": "text", + "content": "2 Overview" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 183, + 295, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 183, + 295, + 256 + ], + "spans": [ + { + "bbox": [ + 50, + 183, + 295, + 256 + ], + "type": "text", + "content": "This chapter establishes a conceptual framework for the paper along two key dimensions. First, it formally defines \"reasoning\" and distinguishes it from \"inference.\" Second, it organizes a taxonomy of synergy mechanisms between \"RAG and Reasoning.\" To construct a clear cognitive pathway, we address three progressive research questions:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 259, + 294, + 295 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 67, + 259, + 237, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 259, + 237, + 270 + ], + "spans": [ + { + "bbox": [ + 67, + 259, + 237, + 270 + ], + "type": "text", + "content": "- Why synergize RAG and reasoning?" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 271, + 294, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 271, + 294, + 283 + ], + "spans": [ + { + "bbox": [ + 67, + 271, + 294, + 283 + ], + "type": "text", + "content": "- What are their typical collaboration paradigms?" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 283, + 242, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 283, + 242, + 295 + ], + "spans": [ + { + "bbox": [ + 67, + 283, + 242, + 295 + ], + "type": "text", + "content": "- How can this integration be realized?" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 51, + 306, + 124, + 316 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 306, + 124, + 316 + ], + "spans": [ + { + "bbox": [ + 51, + 306, + 124, + 316 + ], + "type": "text", + "content": "2.1 Definition" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "spans": [ + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "type": "text", + "content": "The definition of reasoning in modern AI systems remains an evolving construct, particularly within the context of LRMs exemplified by DeepSeek R1 and OpenAI O1. Here, under the scope of LLMs, we formalize reasoning as a structured, multi-step process that dynamically decomposes complex problems, generates intermediate hypotheses, and iteratively refines solutions through logical and evidence-based transformations. Mathematically, let a reasoning process " + }, + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "type": "text", + "content": " be defined as a tuple " + }, + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "type": "inline_equation", + "content": "\\langle \\mathcal{K}_p, \\mathcal{K}_r, S_t, \\Phi \\rangle" + }, + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_p" + }, + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "type": "text", + "content": " denotes parametric knowledge embeddings, " + }, + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_r" + }, + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "type": "text", + "content": " represents retrieved contextual knowledge, " + }, + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "type": "inline_equation", + "content": "S_t = \\{s_0, s_1, \\ldots, s_n\\}" + }, + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "type": "text", + "content": " constitutes the evolving state sequence with " + }, + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "type": "inline_equation", + "content": "s_0" + }, + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "type": "text", + "content": " as the initial query and " + }, + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "type": "inline_equation", + "content": "s_n" + }, + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "type": "text", + "content": " as the final response, and " + }, + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "type": "inline_equation", + "content": "\\Phi : S_i \\times \\mathcal{K}_p \\times \\mathcal{K}_r \\to S_{i+1}" + }, + { + "bbox": [ + 50, + 321, + 301, + 487 + ], + "type": "text", + "content": " defines the state transition function." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 488, + 295, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 488, + 295, + 693 + ], + "spans": [ + { + "bbox": [ + 50, + 488, + 295, + 693 + ], + "type": "text", + "content": "The reasoning process exhibits three defining characteristics. First, it is inherently multi-step, systematically decomposing complex problems into intermediate cognitive states (e.g., sub-question generation or temporary conclusions) rather than pursuing direct input-output mapping. Second, it generates novel knowledge or facts – synthesizing implicit relationships, deriving latent constraints, or reformulating problems in ways not explicitly present in the initial input or parametric memory (e.g., transforming \"Is A greater than B?\" into comparative subquestions about A and B's attributes). Crucially, these representations are not merely retrieved but dynamically constructed through the reasoning trajectory. Third, the process is teleological – its architecture and termination conditions are explicitly optimized for complex problem resolution, where complexity is measured by the necessity of state transitions or the insufficiency of direct retrieval from either parametric " + }, + { + "bbox": [ + 50, + 488, + 295, + 693 + ], + "type": "inline_equation", + "content": "(\\mathcal{K}_p)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 72, + 559, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 559, + 108 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 559, + 108 + ], + "type": "text", + "content": "or external " + }, + { + "bbox": [ + 313, + 72, + 559, + 108 + ], + "type": "inline_equation", + "content": "(\\mathcal{K}_r)" + }, + { + "bbox": [ + 313, + 72, + 559, + 108 + ], + "type": "text", + "content": " knowledge sources. This stands in stark contrast to atomic inference, which lacks such deliberate state construction and goal-aware iteration." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 108, + 559, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 108, + 559, + 288 + ], + "spans": [ + { + "bbox": [ + 313, + 108, + 559, + 288 + ], + "type": "text", + "content": "The distinction between reasoning and inference manifests most saliently in their computational signatures. While inference " + }, + { + "bbox": [ + 313, + 108, + 559, + 288 + ], + "type": "inline_equation", + "content": "\\mathcal{I}" + }, + { + "bbox": [ + 313, + 108, + 559, + 288 + ], + "type": "text", + "content": " constitutes a single-step conditional probability computation " + }, + { + "bbox": [ + 313, + 108, + 559, + 288 + ], + "type": "inline_equation", + "content": "P(y|x) = \\prod_{t=1}^{T} P(y_t|x, y_{, ) to steer model behavior, tuning-based methods that inject domain-specific knowledge or distill reasoning capability, and RL-based frameworks that optimize retrieval-reasoning policies through outcome reward models (ORM) or process reward models (PRM). The alignment between these" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "text", + "content": "Synergizing RAG and Reasoning: A Systematic Review" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 399, + 47, + 559, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 47, + 559, + 57 + ], + "spans": [ + { + "bbox": [ + 399, + 47, + 559, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 72, + 295, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 72, + 295, + 133 + ], + "spans": [ + { + "bbox": [ + 51, + 72, + 295, + 133 + ], + "type": "text", + "content": "methodologies and the proposed taxonomy is critical—static workflows predominantly rely on predictable prompt-guided reasoning chains, whereas dynamic systems increasingly integrate search-based exploration or solver-augmented strategies to navigate evolving state spaces." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 133, + 295, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 133, + 295, + 217 + ], + "spans": [ + { + "bbox": [ + 50, + 133, + 295, + 217 + ], + "type": "text", + "content": "Overall, this tripartite taxonomy—motivational drivers, architectural paradigms, and implementation methodologies—establishes a unified lens for analyzing RAG+Reasoning systems. Subsequent chapters will elaborate on each stratum, progressively revealing how these conceptual distinctions translate into technical innovations that push the boundaries of machine intelligence." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 243, + 220, + 257 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 243, + 220, + 257 + ], + "spans": [ + { + "bbox": [ + 52, + 243, + 220, + 257 + ], + "type": "text", + "content": "3 The purpose of the synergy" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 258, + 295, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 258, + 295, + 377 + ], + "spans": [ + { + "bbox": [ + 50, + 258, + 295, + 377 + ], + "type": "text", + "content": "The integration of RAG and reasoning marks a crucial advancement in enhancing LLMs' problem-solving abilities. Their true potential lies not in isolated use but in their synergy, which overcomes key limitations in retrieval and reasoning. This section explains the main motivations for combining RAG with reasoning, emphasizing two primary benefits: (1) enhancing retrieval accuracy and flexibility through reasoning, and (2) reinforcing complex reasoning by using context-rich retrieved knowledge. Figure 4 illustrates these collaborative aims and the limitations they address." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 378, + 295, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 378, + 295, + 486 + ], + "spans": [ + { + "bbox": [ + 50, + 378, + 295, + 486 + ], + "type": "text", + "content": "The first key benefit is Reasoning-Augmented Retrieval where reasoning improves the retrieval process. Traditional RAG systems struggle with query formulation, relevance assessment, and iterative refinement—tasks needing logical and contextual analysis. Reasoning enables adaptive retrieval through dynamic query expansion, ambiguity resolution, and multi-hop evidence aggregation, overcoming the limits of keyword- or embedding-based methods and aligning retrieval with the task's reasoning demands." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 486, + 304, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 486, + 304, + 581 + ], + "spans": [ + { + "bbox": [ + 50, + 486, + 304, + 581 + ], + "type": "text", + "content": "The second benefit is Retrieval-Augmented Reasoning, where external knowledge supplements the limitations of purely parametric LLM reasoning. Even advanced models face hallucination, knowledge gaps, and compositional challenges alone. Retrieval grounds reasoning in up-to-date, domain-specific, or rare information absent from model weights, crucial for explainability, multi-step deduction, and integrating diverse sources." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 582, + 295, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 582, + 295, + 629 + ], + "spans": [ + { + "bbox": [ + 50, + 582, + 295, + 629 + ], + "type": "text", + "content": "Together, combining RAG and reasoning fills fundamental gaps in both techniques. By enhancing retrieval via reasoning and strengthening reasoning through retrieval, it broadens LLMs' capacity to address complex real-world problems." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 656, + 223, + 667 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 656, + 223, + 667 + ], + "spans": [ + { + "bbox": [ + 52, + 656, + 223, + 667 + ], + "type": "text", + "content": "3.1 Reasoning-Augmented Retrieval" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 52, + 670, + 294, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 670, + 294, + 718 + ], + "spans": [ + { + "bbox": [ + 52, + 670, + 294, + 718 + ], + "type": "text", + "content": "Reasoning-Augmented Retrieval (RAR) represents a significant advancement in information retrieval by integrating multi-step reasoning to dynamically enhance retrieval quality. Unlike traditional methods that depend on static semantic" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 317, + 72, + 558, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 72, + 558, + 108 + ], + "spans": [ + { + "bbox": [ + 317, + 72, + 558, + 108 + ], + "type": "text", + "content": "matching, RAR creates a cognitive feedback loop mimicking human iterative reasoning, surpassing the limitations of simple \"query-document\" interactions." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 108, + 559, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 108, + 559, + 312 + ], + "spans": [ + { + "bbox": [ + 313, + 108, + 559, + 312 + ], + "type": "text", + "content": "RAR's effectiveness stems from several key features. It often uses on-demand retrieval, where reasoning-evaluating intent clarity, knowledge state, and temporal factors-guides adaptive search initiation, reducing redundancies present in fixed triggers (e.g., UAR's classifier [14]). It improves semantic alignment by inferring implicit query logic such as business rules or entity relationships to generate precise retrieval requests aligned with data schemas (e.g., PlanRAG's plan-retrieval loops [48]). RAR also applies multi-step iterative refinement, using intermediate reasoning outputs (e.g., chain-of-thought, partial answers [78]) to recursively reformulate queries in a closed-loop system essential for resolving multi-hop dependencies [68]. Furthermore, it adapts to specific domains by tailoring retrieval to vertical contexts (e.g., financial or medical) and balances efficiency and precision through lightweight reasoning strategies (e.g., AdaptiveRAG's complexity-based selection [41])." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 317, + 312, + 558, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 312, + 558, + 382 + ], + "spans": [ + { + "bbox": [ + 317, + 312, + 558, + 382 + ], + "type": "text", + "content": "Traditional retrieval systems, effective for simple queries, struggle with complex information needs due to rigid designs favoring static matching over dynamic reasoning, limiting their adaptability to changing contexts and diverse data. RAR primarily addresses five core challenges inherent in these conventional methods." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 389, + 559, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 389, + 559, + 616 + ], + "spans": [ + { + "bbox": [ + 313, + 389, + 559, + 616 + ], + "type": "text", + "content": "3.1.1 Semantic Disparities Between Queries and Documents. A key challenge lies in the mismatch between user queries and documents—whether due to differing expression styles (professional jargon vs. casual language) or implicit contextual gaps—making direct semantic matching unreliable. Importantly, high similarity does not guarantee true relevance, as documents may share keywords or surface features without addressing the underlying intent or logic of the query. Retrieval models must therefore understand deeper semantics beyond superficial similarity. Domain adaptation further complicates this issue. To overcome these gaps, approaches such as reasoning-augmented embeddings (O1-Embedder [101] enriches queries with inferred \"thinking\" text), feedback-driven rewriting (SmartRAG [20] dynamically refines queries based on retrieved results), and preplanning (PlanRAG [48] extracts business rules to generate SQL queries aligned with database schemas) help better capture domain-specific semantics and ensure relevance beyond mere similarity." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 317, + 622, + 558, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 622, + 558, + 718 + ], + "spans": [ + { + "bbox": [ + 317, + 622, + 558, + 718 + ], + "type": "text", + "content": "3.1.2 Inflexible Intent Disambiguation. Traditional RAG methods rely on fixed embedding similarity strategies, which fail to dynamically interpret the implicit intent behind complex queries (e.g., multi-hop reasoning or domain-specific requirements). User queries often exhibit semantic complexity that far exceeds their surface text—for instance, a request to \"optimize supply chain costs\" may require correlating disparate database fields not explicitly" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "spans": [ + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "type": "text", + "content": "Gao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 83, + 79, + 110 + ], + "blocks": [ + { + "bbox": [ + 60, + 83, + 79, + 110 + ], + "lines": [ + { + "bbox": [ + 60, + 83, + 79, + 110 + ], + "spans": [ + { + "bbox": [ + 60, + 83, + 79, + 110 + ], + "type": "image", + "image_path": "09571464f1bd88bbbe376dc373e70dd2c58a83763d1dbb94f0dcc2d042a01304.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 167, + 436, + 443, + 449 + ], + "lines": [ + { + "bbox": [ + 167, + 436, + 443, + 449 + ], + "spans": [ + { + "bbox": [ + 167, + 436, + 443, + 449 + ], + "type": "text", + "content": "Figure 4. The purpose of the synergy between RAG and reasoning" + } + ] + } + ], + "index": 60, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 80, + 90, + 153, + 110 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 90, + 153, + 110 + ], + "spans": [ + { + "bbox": [ + 80, + 90, + 153, + 110 + ], + "type": "text", + "content": "Core Limitations in RAG" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 192, + 92, + 277, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 192, + 92, + 277, + 102 + ], + "spans": [ + { + "bbox": [ + 192, + 92, + 277, + 102 + ], + "type": "text", + "content": "Semantic Disparities" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 184, + 106, + 268, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 106, + 268, + 114 + ], + "spans": [ + { + "bbox": [ + 184, + 106, + 268, + 114 + ], + "type": "text", + "content": "Lexical and contextual disparitie" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 184, + 114, + 286, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 114, + 286, + 121 + ], + "spans": [ + { + "bbox": [ + 184, + 114, + 286, + 121 + ], + "type": "text", + "content": "(e.g., terminology mismatch, implicit" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 184, + 121, + 230, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 184, + 121, + 230, + 128 + ], + "spans": [ + { + "bbox": [ + 184, + 121, + 230, + 128 + ], + "type": "text", + "content": "context absence)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 186, + 130, + 284, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 130, + 284, + 137 + ], + "spans": [ + { + "bbox": [ + 186, + 130, + 284, + 137 + ], + "type": "text", + "content": "Failure of semantic similarity matching" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 334, + 92, + 403, + 102 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 92, + 403, + 102 + ], + "spans": [ + { + "bbox": [ + 334, + 92, + 403, + 102 + ], + "type": "text", + "content": "Knowledge Gaps" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 319, + 105, + 418, + 110 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 105, + 418, + 110 + ], + "spans": [ + { + "bbox": [ + 319, + 105, + 418, + 110 + ], + "type": "text", + "content": "Long-range reasoning tasks(e.g., multi-" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 319, + 111, + 340, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 111, + 340, + 118 + ], + "spans": [ + { + "bbox": [ + 319, + 111, + 340, + 118 + ], + "type": "text", + "content": "hop QA)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 318, + 121, + 419, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 121, + 419, + 126 + ], + "spans": [ + { + "bbox": [ + 318, + 121, + 419, + 126 + ], + "type": "text", + "content": "Requiring logical integration across" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 318, + 127, + 394, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 127, + 394, + 133 + ], + "spans": [ + { + "bbox": [ + 318, + 127, + 394, + 133 + ], + "type": "text", + "content": "multiple knowledge segments" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 318, + 135, + 419, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 135, + 419, + 140 + ], + "spans": [ + { + "bbox": [ + 318, + 135, + 419, + 140 + ], + "type": "text", + "content": "Absence of intermediate knowledge" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 318, + 141, + 419, + 147 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 141, + 419, + 147 + ], + "spans": [ + { + "bbox": [ + 318, + 141, + 419, + 147 + ], + "type": "text", + "content": "leads to reasoning chain fragmentation" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 456, + 91, + 529, + 100 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 456, + 91, + 529, + 100 + ], + "spans": [ + { + "bbox": [ + 456, + 91, + 529, + 100 + ], + "type": "text", + "content": "Core Limitations" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 463, + 102, + 521, + 112 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 463, + 102, + 521, + 112 + ], + "spans": [ + { + "bbox": [ + 463, + 102, + 521, + 112 + ], + "type": "text", + "content": "in Reasoning" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 529, + 87, + 548, + 111 + ], + "blocks": [ + { + "bbox": [ + 529, + 87, + 548, + 111 + ], + "lines": [ + { + "bbox": [ + 529, + 87, + 548, + 111 + ], + "spans": [ + { + "bbox": [ + 529, + 87, + 548, + 111 + ], + "type": "image", + "image_path": "134e9289b64c48e173041598397d78a2ffff93d776484a12645311f697b63f1f.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "bbox": [ + 112, + 148, + 176, + 165 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 148, + 176, + 165 + ], + "spans": [ + { + "bbox": [ + 112, + 148, + 176, + 165 + ], + "type": "text", + "content": "Inflexible Intent Disambiguation" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 94, + 170, + 195, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 170, + 195, + 175 + ], + "spans": [ + { + "bbox": [ + 94, + 170, + 195, + 175 + ], + "type": "text", + "content": "Failure to resolve implicit intents in" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 94, + 176, + 137, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 176, + 137, + 182 + ], + "spans": [ + { + "bbox": [ + 94, + 176, + 137, + 182 + ], + "type": "text", + "content": "complex queries" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 94, + 184, + 194, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 184, + 194, + 190 + ], + "spans": [ + { + "bbox": [ + 94, + 184, + 194, + 190 + ], + "type": "text", + "content": "(e.g., multi-hop reasoning, domain-" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 94, + 190, + 151, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 190, + 151, + 196 + ], + "spans": [ + { + "bbox": [ + 94, + 190, + 151, + 196 + ], + "type": "text", + "content": "specific requirements)" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 94, + 198, + 194, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 198, + 194, + 204 + ], + "spans": [ + { + "bbox": [ + 94, + 198, + 194, + 204 + ], + "type": "text", + "content": "The semantic complexity of user queries" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 94, + 205, + 175, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 205, + 175, + 210 + ], + "spans": [ + { + "bbox": [ + 94, + 205, + 175, + 210 + ], + "type": "text", + "content": "may far exceed their surface text" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 63, + 224, + 205, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 224, + 205, + 232 + ], + "spans": [ + { + "bbox": [ + 63, + 224, + 205, + 232 + ], + "type": "text", + "content": "Heterogeneous Data Collaboration" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 77, + 239, + 158, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 239, + 158, + 244 + ], + "spans": [ + { + "bbox": [ + 77, + 239, + 158, + 244 + ], + "type": "text", + "content": "Schema-disparate data sources" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 77, + 247, + 192, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 247, + 192, + 259 + ], + "spans": [ + { + "bbox": [ + 77, + 247, + 192, + 259 + ], + "type": "text", + "content": "(e.g., structured records vs. unstructured passages)" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 77, + 262, + 192, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 262, + 192, + 268 + ], + "spans": [ + { + "bbox": [ + 77, + 262, + 192, + 268 + ], + "type": "text", + "content": "Requires cross-modal retrieval and alignment" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 108, + 281, + 203, + 290 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 281, + 203, + 290 + ], + "spans": [ + { + "bbox": [ + 108, + 281, + 203, + 290 + ], + "type": "text", + "content": "Efficiency vs. Precision" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 105, + 295, + 203, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 295, + 203, + 301 + ], + "spans": [ + { + "bbox": [ + 105, + 295, + 203, + 301 + ], + "type": "text", + "content": "Comprehensive Retrieval " + }, + { + "bbox": [ + 105, + 295, + 203, + 301 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 105, + 295, + 203, + 301 + ], + "type": "text", + "content": " Overhead" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 105, + 304, + 203, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 203, + 311 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 203, + 311 + ], + "type": "text", + "content": "Restricted Retrieval " + }, + { + "bbox": [ + 105, + 304, + 203, + 311 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 105, + 304, + 203, + 311 + ], + "type": "text", + "content": " Critical info loss" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 105, + 315, + 196, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 315, + 196, + 321 + ], + "spans": [ + { + "bbox": [ + 105, + 315, + 196, + 321 + ], + "type": "text", + "content": "Iterations " + }, + { + "bbox": [ + 105, + 315, + 196, + 321 + ], + "type": "inline_equation", + "content": "\\uparrow \\rightarrow" + }, + { + "bbox": [ + 105, + 315, + 196, + 321 + ], + "type": "text", + "content": " Computational costs" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 105, + 324, + 201, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 324, + 201, + 331 + ], + "spans": [ + { + "bbox": [ + 105, + 324, + 201, + 331 + ], + "type": "text", + "content": "Lack of dynamic trade-off mechanism" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 209, + 162, + 365, + 194 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 162, + 365, + 194 + ], + "spans": [ + { + "bbox": [ + 209, + 162, + 365, + 194 + ], + "type": "text", + "content": "Reasoning Augmented Retrieval" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 290, + 215, + 321, + 228 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 290, + 215, + 321, + 228 + ], + "spans": [ + { + "bbox": [ + 290, + 215, + 321, + 228 + ], + "type": "text", + "content": "RAG" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 268, + 258, + 338, + 273 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 268, + 258, + 338, + 273 + ], + "spans": [ + { + "bbox": [ + 268, + 258, + 338, + 273 + ], + "type": "text", + "content": "Reasoning" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 303, + 285, + 362, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 285, + 362, + 298 + ], + "spans": [ + { + "bbox": [ + 303, + 285, + 362, + 298 + ], + "type": "text", + "content": "Retrieval" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 258, + 301, + 405, + 316 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 301, + 405, + 316 + ], + "spans": [ + { + "bbox": [ + 258, + 301, + 405, + 316 + ], + "type": "text", + "content": "Augmented Reasoning" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 297, + 342, + 395, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 297, + 342, + 395, + 361 + ], + "spans": [ + { + "bbox": [ + 297, + 342, + 395, + 361 + ], + "type": "text", + "content": "Search Space Explosion & Local Optima Traps" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 296, + 365, + 396, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 365, + 396, + 376 + ], + "spans": [ + { + "bbox": [ + 296, + 365, + 396, + 376 + ], + "type": "text", + "content": "Search space grows exponentially with reasoning steps" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 296, + 379, + 396, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 379, + 396, + 391 + ], + "spans": [ + { + "bbox": [ + 296, + 379, + 396, + 391 + ], + "type": "text", + "content": "Traditional multi-step reasoning methods lack external knowledge constraints" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 296, + 394, + 396, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 394, + 396, + 399 + ], + "spans": [ + { + "bbox": [ + 296, + 394, + 396, + 399 + ], + "type": "text", + "content": "Lead to invalid hypotheses, local optima" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 296, + 400, + 375, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 400, + 375, + 405 + ], + "spans": [ + { + "bbox": [ + 296, + 400, + 375, + 405 + ], + "type": "text", + "content": "traps, or logical inconsistencies" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 409, + 156, + 522, + 165 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 409, + 156, + 522, + 165 + ], + "spans": [ + { + "bbox": [ + 409, + 156, + 522, + 165 + ], + "type": "text", + "content": "Domain Knowledge Boudary" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 414, + 170, + 515, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 414, + 170, + 515, + 183 + ], + "spans": [ + { + "bbox": [ + 414, + 170, + 515, + 183 + ], + "type": "text", + "content": "Pre-trained models exhibit constrained knowledge coverage" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 414, + 186, + 514, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 414, + 186, + 514, + 198 + ], + "spans": [ + { + "bbox": [ + 414, + 186, + 514, + 198 + ], + "type": "text", + "content": "Struggle with tasks requiring domain-specific expertise" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 414, + 199, + 487, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 414, + 199, + 487, + 205 + ], + "spans": [ + { + "bbox": [ + 414, + 199, + 487, + 205 + ], + "type": "text", + "content": "(e.g., semiconductor design)" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 415, + 208, + 515, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 415, + 208, + 515, + 220 + ], + "spans": [ + { + "bbox": [ + 415, + 208, + 515, + 220 + ], + "type": "text", + "content": "Processing tasks requiring real-time information is challenging" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 444, + 229, + 525, + 247 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 444, + 229, + 525, + 247 + ], + "spans": [ + { + "bbox": [ + 444, + 229, + 525, + 247 + ], + "type": "text", + "content": "Dynamic Knowledge Requirements" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 425, + 251, + 544, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 425, + 251, + 544, + 258 + ], + "spans": [ + { + "bbox": [ + 425, + 251, + 544, + 258 + ], + "type": "text", + "content": "Progressively evolving knowledge requirements" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 427, + 262, + 543, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 427, + 262, + 543, + 273 + ], + "spans": [ + { + "bbox": [ + 427, + 262, + 543, + 273 + ], + "type": "text", + "content": "Initial retrieval results are irrelevant or redundant to subsequent reasoning steps" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 427, + 275, + 543, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 427, + 275, + 543, + 288 + ], + "spans": [ + { + "bbox": [ + 427, + 275, + 543, + 288 + ], + "type": "text", + "content": "Dynamically evolving information needs in complex reasoning tasks" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 427, + 290, + 542, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 427, + 290, + 542, + 302 + ], + "spans": [ + { + "bbox": [ + 427, + 290, + 542, + 302 + ], + "type": "text", + "content": "Fixed retrieval strategies struggle to achieve real-time matching" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 410, + 314, + 525, + 323 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 410, + 314, + 525, + 323 + ], + "spans": [ + { + "bbox": [ + 410, + 314, + 525, + 323 + ], + "type": "text", + "content": "Insufficient Depth & Breadth" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 417, + 329, + 518, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 417, + 329, + 518, + 335 + ], + "spans": [ + { + "bbox": [ + 417, + 329, + 518, + 335 + ], + "type": "text", + "content": "The inherent static knowledge of LLMs" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 417, + 338, + 518, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 417, + 338, + 518, + 350 + ], + "spans": [ + { + "bbox": [ + 417, + 338, + 518, + 350 + ], + "type": "text", + "content": "Challenge of covering dynamically evolving domain knowledge boundaries" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 417, + 354, + 518, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 417, + 354, + 518, + 366 + ], + "spans": [ + { + "bbox": [ + 417, + 354, + 518, + 366 + ], + "type": "text", + "content": "The reasoning chains frequently terminate at superficial associations" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 417, + 368, + 517, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 417, + 368, + 517, + 380 + ], + "spans": [ + { + "bbox": [ + 417, + 368, + 517, + 380 + ], + "type": "text", + "content": "The inability to establish cross-domain, multi-level knowledge connections" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 50, + 464, + 296, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 464, + 296, + 644 + ], + "spans": [ + { + "bbox": [ + 50, + 464, + 296, + 644 + ], + "type": "text", + "content": "mentioned. Static retrieval methods lack the adaptability to capture such dynamically evolving information needs. A critical limitation lies in intent dynamicity: as contextual understanding expands, traditional systems generate fixed retrieval results based solely on the initial query. Furthermore, semantic representation limitations of dense retrieval models (e.g., BERT-based models) hinder their ability to encode intricate semantic relationships (e.g., irony, metaphors), leading to misaligned results. Current approaches attempt to mitigate these issues through multi-step intent decomposition (e.g., LevelRAG's high-level searcher breaks complex queries into multi-hop sub-queries [103]) and dynamic query reformulation (e.g., LeReT's reinforcement learning generates diversified query candidates [34]), iteratively refining retrieval strategies to align with document content." + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 50, + 658, + 296, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 658, + 296, + 718 + ], + "spans": [ + { + "bbox": [ + 50, + 658, + 296, + 718 + ], + "type": "text", + "content": "3.1.3 Inefficient Coordination of Multi-Source Heterogeneous Data. Retrieval from diverse sources—text, tables, graphs, web, and APIs—often produces fragmented results due to a lack of global reasoning. The key challenge is modal heterogeneity: different retrieval techniques (dense" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 313, + 464, + 569, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 464, + 569, + 681 + ], + "spans": [ + { + "bbox": [ + 313, + 464, + 569, + 681 + ], + "type": "text", + "content": "retrieval for text, SQL for tables, GQL for graphs) operate independently without unified coordination. For example, experiments show standard RAG methods (like dense retrieval with query decomposition) yield only " + }, + { + "bbox": [ + 313, + 464, + 569, + 681 + ], + "type": "inline_equation", + "content": "32.7\\%" + }, + { + "bbox": [ + 313, + 464, + 569, + 681 + ], + "type": "text", + "content": " perfect recall and " + }, + { + "bbox": [ + 313, + 464, + 569, + 681 + ], + "type": "inline_equation", + "content": "40.9\\%" + }, + { + "bbox": [ + 313, + 464, + 569, + 681 + ], + "type": "text", + "content": " F1 on the OTT-QA dataset. These outcomes reveal the limitations of traditional approaches in aligning textual queries with structured tables—such as failing to link concepts like \"K-12 student free rates\" in text to related \"education expenditure\" columns when not explicitly mentioned. Additionally, disconnected entity matching (e.g., relating \"company revenue\" in text to financial tables) worsens inefficiencies, as conventional methods depend on semantic similarity and overlook domain-specific relationships and exact-value matches. Advanced techniques—such as reasoning-driven alignment (ARM's N-gram constraints for cross-modal entity decoding [7]) and unified semantic spaces (LevelRAG's shared multi-modal representations [103])—enable more effective, integrated retrieval." + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 314, + 694, + 559, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 694, + 559, + 718 + ], + "spans": [ + { + "bbox": [ + 314, + 694, + 559, + 718 + ], + "type": "text", + "content": "3.1.4 Incompleteness and Incoherence in Complex Retrieval Tasks. Single-step retrieval systems fall short in" + } + ] + } + ], + "index": 64 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "text", + "content": "Synergizing RAG and Reasoning: A Systematic Review" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 399, + 47, + 559, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 47, + 559, + 57 + ], + "spans": [ + { + "bbox": [ + 399, + 47, + 559, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 72, + 295, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 72, + 295, + 204 + ], + "spans": [ + { + "bbox": [ + 50, + 72, + 295, + 204 + ], + "type": "text", + "content": "complex multi-hop reasoning tasks, such as deducing entity chains or conducting decision analysis. Traditional static retrieval conflicts with multi-step cognitive needs, resulting in three main issues: 1) Path dependency, where later retrievals rely on information from earlier steps (e.g., finding \"the most populous county in California\" before its education policies), but conventional systems lack state management; 2) Error propagation, early retrieval errors cause mistakes in intermediate results, which then affect the next round of retrieval; 3) Semantic inflexibility of fixed queries, which cannot adapt to dynamic concepts like entity aliases or relational predicates." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 204, + 301, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 204, + 301, + 289 + ], + "spans": [ + { + "bbox": [ + 50, + 204, + 301, + 289 + ], + "type": "text", + "content": "Advanced methods address these flaws through integrated strategies. PlanRAG uses iterative \"plan-retrospect-replan\" cycles to trigger sub-queries when gaps arise. Reinforcement learning in LeReT improves query generation via reward-driven path selection. Likewise, ITER-RETGEN rebuilds follow-up queries using intermediate answers (e.g., \"award recipient's height\") to resolve multi-hop dependencies." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 303, + 295, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 303, + 295, + 445 + ], + "spans": [ + { + "bbox": [ + 50, + 303, + 295, + 445 + ], + "type": "text", + "content": "3.1.5 Trade-offs Between Retrieval Efficiency and Precision. Complex scenarios face a tension between exhaustive retrieval, which is computationally costly, and restricted retrieval, which risks information loss. Expanding retrieval blindly inflates costs (e.g., LLM API calls) without ensuring relevance. Simple queries suffer from unnecessary multi-step retrieval, wasting resources, while complex queries face quality risks if retrieval is too limited. Adaptive approaches like complexity-aware routing (Adaptive-RAG's lightweight classifier allocates retrieval budgets [41]) and cost-sensitive training (SmartRAG's reinforcement learning balances quality and steps [20]) dynamically manage this trade-off." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 446, + 295, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 446, + 295, + 554 + ], + "spans": [ + { + "bbox": [ + 50, + 446, + 295, + 554 + ], + "type": "text", + "content": "In summary, Reasoning-Augmented Retrieval overcomes traditional RAG's limitations in dynamic triggering, semantic alignment, multi-hop support, domain adaptation, and efficiency trade-offs by deeply integrating reasoning into the retrieval process. Its key innovation is a bidirectional enhancement between reasoning and retrieval—reasoning refines retrieval strategies, while retrieval supports iterative reasoning—jointly boosting accuracy and efficiency in complex information tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 571, + 224, + 583 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 571, + 224, + 583 + ], + "spans": [ + { + "bbox": [ + 51, + 571, + 224, + 583 + ], + "type": "text", + "content": "3.2 Retrieval-Augmented Reasoning" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 586, + 295, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 586, + 295, + 718 + ], + "spans": [ + { + "bbox": [ + 50, + 586, + 295, + 718 + ], + "type": "text", + "content": "Retrieval-Augmented Reasoning (ReAR) combines external knowledge retrieval with inherent model reasoning to overcome failures from knowledge gaps or logical discontinuities in complex tasks. Unlike traditional RAG methods that retrieve information once, ReAR uses an iterative, context-sensitive retrieval that continuously provides relevant data to support multi-step reasoning. This approach is crucial for tasks needing strict logic, such as mathematical proofs, where intermediate steps require specific theorems or lemmas. By making retrieval an adaptive, ongoing process rather than a one-time step, ReAR strengthens each reasoning stage" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 72, + 558, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 558, + 95 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 558, + 95 + ], + "type": "text", + "content": "with accurate, current information, improving the overall inference's reliability and robustness." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 96, + 559, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 96, + 559, + 263 + ], + "spans": [ + { + "bbox": [ + 313, + 96, + 559, + 263 + ], + "type": "text", + "content": "ReAR's core feature is dynamic knowledge supplementation, generating retrieval queries in real-time based on the evolving reasoning context. This overcomes the limits of single-round retrieval by enabling knowledge refinement at each step, as seen in process supervision frameworks like RAG-Gym [96]. ReAR also improves reasoning paths using methods like search space compression—for example, MCTS-guided heuristics in KBQA—and structured feedback from diverse sources like knowledge graphs [97]. These techniques maintain logical consistency while reducing irrelevant or conflicting information. Importantly, ReAR adapts well across domains, supporting precise knowledge retrieval and tool use for specialized tasks such as industrial problem-solving in PIKE [82] or scientific reasoning [106]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 264, + 559, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 264, + 559, + 360 + ], + "spans": [ + { + "bbox": [ + 313, + 264, + 559, + 360 + ], + "type": "text", + "content": "By integrating retrieval as an active part of the reasoning loop, ReAR addresses LLMs' temporal and depth constraints, ensuring adherence to domain-specific and time-sensitive requirements. This close coupling turns external knowledge into an on-demand resource, creating a closed-loop system that enhances the model's ability to handle complex, knowledge-intensive problems. Specifically, ReAR seeks to address the following limitations and challenges:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 365, + 563, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 365, + 563, + 556 + ], + "spans": [ + { + "bbox": [ + 313, + 365, + 563, + 556 + ], + "type": "text", + "content": "3.2.1 Knowledge Gap in Multi-step Reasoning. In long-range reasoning, missing intermediate knowledge often breaks logical chains, especially in industrial and scientific contexts requiring multi-source data integration (e.g., text, tables, time-series). Static retrieval methods worsen this by not adapting to the reasoning process's changing needs. ReAR techniques address this with chained retrieval, as in CoRAG [83], which breaks multi-hop questions into sequential sub-queries (e.g., retrieving \"event causes\" then their \"impacts\"), systematically linking knowledge. Reasoning-state-aware retrieval, used in FLARE [45], predicts future information needs by generating interim prompts (e.g., \"the next step requires discussion of ...\"), enabling dynamic query construction that preserves coherence. Together, these approaches resolve the conflict between discrete retrieval and continuous reasoning." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 563, + 559, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 563, + 559, + 669 + ], + "spans": [ + { + "bbox": [ + 313, + 563, + 559, + 669 + ], + "type": "text", + "content": "3.2.2 Reasoning Discontinuity Caused by Domain Knowledge Boundaries. Reasoning discontinuity arises from LLMs' limited knowledge, struggling with specialized domains (e.g., semiconductor design in PIKE [82]) and real-time data (e.g., medical parameters in Agentic Reasoning [92]). End-to-end models often produce factual errors, while traditional RAG methods fail to retrieve deep professional knowledge due to coarse retrieval, especially with complex data like tables, charts and images." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 670, + 559, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 670, + 559, + 718 + ], + "spans": [ + { + "bbox": [ + 313, + 670, + 559, + 718 + ], + "type": "text", + "content": "ReAR addresses this with two complementary solutions: knowledge atomization and structural organization, as in PIKE's decomposition of documents into fine-grained units and multi-layer knowledge graphs for semantic and logical" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "spans": [ + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "type": "text", + "content": "Gao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 72, + 294, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 72, + 294, + 144 + ], + "spans": [ + { + "bbox": [ + 50, + 72, + 294, + 144 + ], + "type": "text", + "content": "retrieval; and dynamic tool integration, as in Agentic Reasoning's real-time data acquisition via code execution and API calls to compute critical indicators (e.g., medical FiO2). These innovations overcome the challenges of specialized knowledge depth and timely information relevance that limit conventional methods." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 156, + 295, + 167 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 156, + 295, + 167 + ], + "spans": [ + { + "bbox": [ + 50, + 156, + 295, + 167 + ], + "type": "text", + "content": "3.2.3 Search Space Explosion and Local Optima Traps." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 168, + 295, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 168, + 295, + 335 + ], + "spans": [ + { + "bbox": [ + 50, + 168, + 295, + 335 + ], + "type": "text", + "content": "The main challenge in multi-step reasoning is the exponential growth of the search space, where methods like Chain-of-Thought (CoT) often yield suboptimal or inconsistent results due to unconstrained hypotheses. Traditional approaches like CoT and Tree-of-Thought (ToT) lack external knowledge constraints, causing invalid assumptions, while purely symbolic reasoning falls short in open-domain tasks. To address this, two strategies are used: knowledge base-anchored heuristic search (KBQA-O1 [58]), which limits reasoning actions to subgraphs in knowledge graphs, and a retrieval-verification mechanism (Search-o1 [51]) that prunes unsupported reasoning paths using evidence from the knowledge base. Together, these reduce the search space and preserve reasoning coherence." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 347, + 295, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 347, + 295, + 611 + ], + "spans": [ + { + "bbox": [ + 50, + 347, + 295, + 611 + ], + "type": "text", + "content": "3.2.4 Dynamic Knowledge Requirements in Multi-Step Reasoning. Complex multi-step reasoning tasks face the challenge of continuously changing knowledge requirements. This is evident in cases like multi-hop reasoning and engineering planning, where each stage generates new sub-problems (e.g., moving from \"architectural design\" to \"material cost estimation\"). Static knowledge bases or one-time retrieval methods cannot meet this evolving demand. This manifests in two ways: initial knowledge may miss later needs, causing gaps; and fixed knowledge sets may include irrelevant information, reducing reasoning accuracy. To address this, new retrieval-augmented reasoning approaches introduce dynamic solutions: process supervision (e.g., reward models in RAG-Gym [96]) detects knowledge gaps in real time, atomic decision-making (e.g., step decomposition in DeepRAG [24]) triggers retrieval as needed, and tree-like expansions (e.g., multi-path retrieval in DeepSolution [54]) enable parallel exploration. By integrating knowledge retrieval within reasoning, these methods let the system identify, supplement, and verify knowledge dynamically—much like a human expert—greatly enhancing the reliability and completeness of complex reasoning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 622, + 295, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 622, + 295, + 634 + ], + "spans": [ + { + "bbox": [ + 50, + 622, + 295, + 634 + ], + "type": "text", + "content": "3.2.5 Insufficient Depth and Breadth of Reasoning." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 635, + 295, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 635, + 295, + 718 + ], + "spans": [ + { + "bbox": [ + 50, + 635, + 295, + 718 + ], + "type": "text", + "content": "This issue is prominent in expert tasks like medical diagnosis, legal analysis, and research report generation. LLMs' static knowledge often fails to capture the evolving scope of domain knowledge, resulting in shallow reasoning that misses multi-level, cross-domain connections. For example, when assessing \"Company A is affected by economic recession,\" traditional methods rely on superficial statistical" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 559, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 559, + 120 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 559, + 120 + ], + "type": "text", + "content": "patterns and cannot systematically follow the deeper logical chain from \"Company A " + }, + { + "bbox": [ + 313, + 72, + 559, + 120 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 313, + 72, + 559, + 120 + ], + "type": "text", + "content": " industry supply chain " + }, + { + "bbox": [ + 313, + 72, + 559, + 120 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 313, + 72, + 559, + 120 + ], + "type": "text", + "content": " macroeconomic policy " + }, + { + "bbox": [ + 313, + 72, + 559, + 120 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 313, + 72, + 559, + 120 + ], + "type": "text", + "content": " international political landscape,\" leading to reasoning that lacks causal depth." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 121, + 559, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 121, + 559, + 275 + ], + "spans": [ + { + "bbox": [ + 313, + 121, + 559, + 275 + ], + "type": "text", + "content": "To overcome this, recent advances use structured, retrieval-enhanced frameworks. ToG2.0 [60] models Knowledge Graph relational paths as retrieval guidance vectors, enabling targeted queries along entity paths, surpassing the limits of keyword-based retrieval. This approach complements CR-Planner's [52] iterative expansion, which triggers retrieval of specialized knowledge (e.g., textbook proofs of algorithm complexity) at critical reasoning points, ensuring accurate domain knowledge integration via multi-round validation. Addressing cross-domain knowledge linkage, CO-STORM [43] employs a multi-agent system whose host module generates cross-modal retrieval commands by analyzing potential semantics in uncited documents." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 291, + 442, + 305 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 291, + 442, + 305 + ], + "spans": [ + { + "bbox": [ + 314, + 291, + 442, + 305 + ], + "type": "text", + "content": "4 Patterns of synergy" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 307, + 559, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 307, + 559, + 427 + ], + "spans": [ + { + "bbox": [ + 313, + 307, + 559, + 427 + ], + "type": "text", + "content": "Section 3 detailed the need and motivation for integrating RAG with reasoning. Building on this, this section presents two core implementation patterns for RAG-reasoning synergy (Figure 5): (1) the Pre-defined Workflow, which uses logical architectures with preset rules for coordination, and (2) Dynamic Workflow, which relies on context-aware, adaptive coordination via real-time decision engines. These patterns illustrate current frameworks combining knowledge retrieval and multi-step reasoning from deterministic and flexible perspectives." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 441, + 440, + 451 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 441, + 440, + 451 + ], + "spans": [ + { + "bbox": [ + 314, + 441, + 440, + 451 + ], + "type": "text", + "content": "4.1 Pre-defined workflow" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 456, + 559, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 456, + 559, + 622 + ], + "spans": [ + { + "bbox": [ + 313, + 456, + 559, + 622 + ], + "type": "text", + "content": "Pre-defined workflow is a multi-step reasoning approach with a fixed architecture and sequential execution, emphasizing process clarity and operational determinism. It consists of predefined iterative stages, each with strict input-output rules and no dynamic changes based on intermediate results. This modular design ensures controllability and structured reasoning for complex tasks. All steps are executed regardless of intermediate outcomes, guaranteeing repeatability and stability while avoiding uncertainties from dynamic decisions. Although it sacrifices adaptability, this approach offers procedural predictability and is well-suited for scenarios demanding clear reasoning paths, albeit with possible computational redundancy due to lack of real-time adjustments." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 623, + 559, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 623, + 559, + 683 + ], + "spans": [ + { + "bbox": [ + 313, + 623, + 559, + 683 + ], + "type": "text", + "content": "Mathematically, the pre-defined RAG workflow can be formalized as a deterministic multi-step operational chain. Given an input query " + }, + { + "bbox": [ + 313, + 623, + 559, + 683 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 313, + 623, + 559, + 683 + ], + "type": "text", + "content": " and a predefined sequence of " + }, + { + "bbox": [ + 313, + 623, + 559, + 683 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 623, + 559, + 683 + ], + "type": "text", + "content": " reasoning steps and the final decision output " + }, + { + "bbox": [ + 313, + 623, + 559, + 683 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 313, + 623, + 559, + 683 + ], + "type": "text", + "content": ", the complete workflow is expressed as:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 385, + 706, + 559, + 719 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 385, + 706, + 559, + 719 + ], + "spans": [ + { + "bbox": [ + 385, + 706, + 559, + 719 + ], + "type": "interline_equation", + "content": "D = f _ {N} \\circ \\dots \\circ f _ {2} \\circ f _ {1} (Q) \\tag {1}", + "image_path": "d542cd58bb3396b896457f99ce48009a442553bf91f1c366bd9dea3388cd6f2c.jpg" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "text", + "content": "Synergizing RAG and Reasoning: A Systematic Review" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 399, + 47, + 558, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 47, + 558, + 57 + ], + "spans": [ + { + "bbox": [ + 399, + 47, + 558, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 70, + 558, + 264 + ], + "blocks": [ + { + "bbox": [ + 52, + 70, + 558, + 264 + ], + "lines": [ + { + "bbox": [ + 52, + 70, + 558, + 264 + ], + "spans": [ + { + "bbox": [ + 52, + 70, + 558, + 264 + ], + "type": "image", + "image_path": "044160498b6a6dec3d4b731753eb83312cd53fabf064ad53ce6793173d12947b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 181, + 274, + 428, + 288 + ], + "lines": [ + { + "bbox": [ + 181, + 274, + 428, + 288 + ], + "spans": [ + { + "bbox": [ + 181, + 274, + 428, + 288 + ], + "type": "text", + "content": "Figure 5. Patterns of Synergy between RAG and Reasoning" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "spans": [ + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "text", + "content": "where each " + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "inline_equation", + "content": "f_{i}\\in \\{\\Psi ,R,\\Gamma \\}" + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "text", + "content": " denotes strictly defined functions for reasoning " + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "inline_equation", + "content": "(\\Psi)" + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "text", + "content": ", retrieval " + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "inline_equation", + "content": "(R)" + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "text", + "content": ", or decision-making " + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "inline_equation", + "content": "(\\Gamma)" + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "text", + "content": ", with " + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "inline_equation", + "content": "\\circ" + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "text", + "content": " representing function composition. This formulation adheres to the fixed mapping sequence " + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "inline_equation", + "content": "Q\\mapsto \\Psi (Q)\\mapsto R(\\Psi (Q))\\mapsto \\Gamma (R(\\Psi (Q)))" + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "text", + "content": ", exhibiting Markovian properties where " + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "inline_equation", + "content": "f_{t + 1}" + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "text", + "content": " depends solely on " + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "text", + "content": "'s output while remaining independent of historical states " + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "inline_equation", + "content": "\\{f_{< t}\\}" + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "text", + "content": ". The chained composition guarantees process closure and reproducibility, though constrained by the static combinatorial nature of " + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "inline_equation", + "content": "\\{f_i\\}_{i = 1}^N" + }, + { + "bbox": [ + 50, + 303, + 295, + 411 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 411, + 295, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 411, + 295, + 446 + ], + "spans": [ + { + "bbox": [ + 50, + 411, + 295, + 446 + ], + "type": "text", + "content": "In the pre-defined pipeline, based on the position where reasoning is introduced, it can be further divided into Pre-Retrieval, Post-Retrieval, and Hybrid." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 453, + 295, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 453, + 295, + 477 + ], + "spans": [ + { + "bbox": [ + 50, + 453, + 295, + 477 + ], + "type": "text", + "content": "4.1.1 Pre-Retrieval Reasoning. For pre-retrieval methods, the sequence is explicitly defined as" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 134, + 484, + 294, + 496 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 134, + 484, + 294, + 496 + ], + "spans": [ + { + "bbox": [ + 134, + 484, + 294, + 496 + ], + "type": "interline_equation", + "content": "D = \\Gamma \\circ \\mathcal {R} \\circ \\Psi (Q) \\tag {2}", + "image_path": "026818ed8e9d2019b420881aa9487005b319ec64912505ff456df2d7da6e3b61.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 502, + 295, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 502, + 295, + 574 + ], + "spans": [ + { + "bbox": [ + 50, + 502, + 295, + 574 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 502, + 295, + 574 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 50, + 502, + 295, + 574 + ], + "type": "text", + "content": " denotes a reasoning operator that systematically transforms or enriches the query prior to retrieval. This paradigm enhances retrieval precision by resolving ambiguities, inferring implicit intents, or optimizing query representations. Current research identifies four principal methodological categories for designing " + }, + { + "bbox": [ + 50, + 502, + 295, + 574 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 50, + 502, + 295, + 574 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 574, + 295, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 574, + 295, + 682 + ], + "spans": [ + { + "bbox": [ + 50, + 574, + 295, + 682 + ], + "type": "text", + "content": "Query Optimization focuses on generating and selecting query variants to maximize retrieval relevance. Mathematically, this is formalized as Candidates " + }, + { + "bbox": [ + 50, + 574, + 295, + 682 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 50, + 574, + 295, + 682 + ], + "type": "text", + "content": " Generate(Q,C), " + }, + { + "bbox": [ + 50, + 574, + 295, + 682 + ], + "type": "inline_equation", + "content": "\\Psi_{\\mathrm{Optimize}}(Q,C) = \\arg \\max_{\\mathrm{candidate} \\in \\mathrm{Candidates}}" + }, + { + "bbox": [ + 50, + 574, + 295, + 682 + ], + "type": "text", + "content": " Score(candidate), where (Generate) produces candidate queries and (arg max) selects optimal variants based on contrastive training or reinforcement learning. Representative implementations, such as LeReT [34], leverage iterative sampling and optimization to balance query diversity and specificity." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 682, + 295, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 682, + 295, + 719 + ], + "spans": [ + { + "bbox": [ + 51, + 682, + 295, + 719 + ], + "type": "text", + "content": "Attribute Judgment employs classification mechanisms to dynamically regulate retrieval triggers. This is modeled as " + }, + { + "bbox": [ + 51, + 682, + 295, + 719 + ], + "type": "inline_equation", + "content": "\\Psi_{\\mathrm{Classify}}(Q) = \\mathrm{Classify}(Q)" + }, + { + "bbox": [ + 51, + 682, + 295, + 719 + ], + "type": "text", + "content": ", where Classify evaluates query" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 303, + 562, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 303, + 562, + 350 + ], + "spans": [ + { + "bbox": [ + 313, + 303, + 562, + 350 + ], + "type": "text", + "content": "attributes (e.g., temporal sensitivity, intent complexity) against predefined criteria. Frameworks like UAR [14] and AdaptiveRAG [41] exemplify this approach by integrating multistage classifiers to minimize unnecessary retrievals." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 351, + 560, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 351, + 560, + 422 + ], + "spans": [ + { + "bbox": [ + 313, + 351, + 560, + 422 + ], + "type": "text", + "content": "Plan Generation decomposes complex queries into structured sub-task sequences to guide retrieval direction. Formulated as " + }, + { + "bbox": [ + 313, + 351, + 560, + 422 + ], + "type": "inline_equation", + "content": "\\Psi_{\\mathrm{Plan}}(Q) = \\mathrm{Plan}(Q)" + }, + { + "bbox": [ + 313, + 351, + 560, + 422 + ], + "type": "text", + "content": ", the operator Plan generates hierarchical task decompositions, as seen in PlanRAG [48], which utilizes chain-of-thought reasoning to align retrieval targets with multi-step problem-solving requirements." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 423, + 559, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 423, + 559, + 494 + ], + "spans": [ + { + "bbox": [ + 313, + 423, + 559, + 494 + ], + "type": "text", + "content": "Semantic Enhancement enriches query representations using domain-specific or task-aware embeddings. Expressed as " + }, + { + "bbox": [ + 313, + 423, + 559, + 494 + ], + "type": "inline_equation", + "content": "\\Psi_{\\text{Enhance}}(Q) = \\text{Encode}(Q, \\mathcal{K})" + }, + { + "bbox": [ + 313, + 423, + 559, + 494 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 423, + 559, + 494 + ], + "type": "inline_equation", + "content": "\\mathcal{K}" + }, + { + "bbox": [ + 313, + 423, + 559, + 494 + ], + "type": "text", + "content": " denotes auxiliary knowledge (e.g., reasoning trajectories), methods like O1-Embedder [101] integrate latent reasoning patterns into query embeddings to improve retrieval robustness." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 495, + 559, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 495, + 559, + 553 + ], + "spans": [ + { + "bbox": [ + 313, + 495, + 559, + 553 + ], + "type": "text", + "content": "Collectively, these methodologies demonstrate that pre-retrieval reasoning serves as a systematic interface to mitigate semantic gaps between raw queries and knowledge bases, establishing a critical component for precision-driven RAG architectures." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 564, + 560, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 564, + 560, + 685 + ], + "spans": [ + { + "bbox": [ + 313, + 564, + 560, + 685 + ], + "type": "text", + "content": "4.1.2 Post-Retrieval Reasoning. In pre-defined RAG systems with multi-step reasoning pipelines, the post-retrieval reasoning paradigm represents a critical advancement where cognitive processing occurs after information retrieval from external sources. This approach addresses inherent limitations in conventional RAG, particularly in managing knowledge conflicts, mitigating information insufficiency, and enhancing logical consistency across complex reasoning tasks. Mathematically, this process can be formalized as a deterministic function composition:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 399, + 706, + 558, + 718 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 706, + 558, + 718 + ], + "spans": [ + { + "bbox": [ + 399, + 706, + 558, + 718 + ], + "type": "interline_equation", + "content": "D = \\Gamma \\circ \\Psi \\circ \\mathcal {R} (Q) \\tag {3}", + "image_path": "11eeb8f217c0603397468d6f575850696bfced99e1ffb539266f4c9bacb0de5f.jpg" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "spans": [ + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "type": "text", + "content": "Gao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 72, + 294, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 72, + 294, + 95 + ], + "spans": [ + { + "bbox": [ + 51, + 72, + 294, + 95 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 51, + 72, + 294, + 95 + ], + "type": "text", + "content": " denotes the retrieval operator, " + }, + { + "bbox": [ + 51, + 72, + 294, + 95 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 51, + 72, + 294, + 95 + ], + "type": "text", + "content": " implements the reasoning transformation, and " + }, + { + "bbox": [ + 51, + 72, + 294, + 95 + ], + "type": "inline_equation", + "content": "\\Gamma" + }, + { + "bbox": [ + 51, + 72, + 294, + 95 + ], + "type": "text", + "content": " represents the final decision function." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 96, + 295, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 96, + 295, + 300 + ], + "spans": [ + { + "bbox": [ + 50, + 96, + 295, + 300 + ], + "type": "text", + "content": "The core characteristic of Post-Retrieval Reasoning lies in its execution of the reasoning process after retrieval, with the reasoning target being the retrieved content. ToG2.0 [60] proposes an iterative multi-step reasoning framework that alternates between graph retrieval and context retrieval, integrating the reasoning judgment of LLMs to progressively expand entities and prune irrelevant information, ultimately generating accurate answers. This approach dynamically addresses the issue of insufficient information through iterative refinement while establishing a dual-evidence verification mechanism via knowledge graph relation pruning and entity-guided context retrieval. Its graph-structured reasoning module transforms the connectivity validation of triple paths into a constraint satisfaction problem, effectively mitigating logical inconsistencies between text fragments and thereby significantly improving the quality of complex question answering." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 300, + 295, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 300, + 295, + 515 + ], + "spans": [ + { + "bbox": [ + 50, + 300, + 295, + 515 + ], + "type": "text", + "content": "ActiveRAG [100], on the other hand, employs a predefined three-stage process (Self-Inquiry " + }, + { + "bbox": [ + 50, + 300, + 295, + 515 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 50, + 300, + 295, + 515 + ], + "type": "text", + "content": " Knowledge Assimilation " + }, + { + "bbox": [ + 50, + 300, + 295, + 515 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 50, + 300, + 295, + 515 + ], + "type": "text", + "content": " Thought Accommodation) to structurally comprehend and calibrate retrieved knowledge, resolving conflicts between parametric memory and external knowledge. During the Knowledge Assimilation stage, ActiveRAG enhances the corrective effect of external knowledge on the internal representations of LLMs through multi-instruction fine-tuning strategies (e.g., counterfactual comparison and anchor association), substantially reducing the likelihood of hallucination generation. ARM's [7] structural alignment and self-verification stages also demonstrate optimization for post-retrieval reasoning. By incorporating domain knowledge via mixed-integer programming (MIP) solvers, ARM ensures the rationality and coverage of retrieval results, providing a scalable optimization framework for multi-source data compatibility and thereby enabling globally optimal cross-modal retrieval." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 521, + 303, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 521, + 303, + 617 + ], + "spans": [ + { + "bbox": [ + 50, + 521, + 303, + 617 + ], + "type": "text", + "content": "4.1.3 Hybrid Reasoning. The Hybrid pattern of pre-defined process forms a composite processing paradigm by integrating pre-retrieval reasoning with post-retrieval reasoning. The essence is formalized as a multi-round recursive iterative process, where each iteration cycle strictly comprises three phases: Retrieval, Generation, and Reasoning, executed as structured composite operations. Let the total number of iterations be " + }, + { + "bbox": [ + 50, + 521, + 303, + 617 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 50, + 521, + 303, + 617 + ], + "type": "text", + "content": "; the workflow is defined as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 110, + 625, + 295, + 645 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 625, + 295, + 645 + ], + "spans": [ + { + "bbox": [ + 110, + 625, + 295, + 645 + ], + "type": "interline_equation", + "content": "Q _ {T} = \\left(\\bigcirc_ {t = 1} ^ {T} \\mathcal {R} _ {\\square} \\circ \\Gamma_ {t} \\circ \\Psi_ {t}\\right) \\left(Q _ {0}\\right) \\tag {4}", + "image_path": "8b88426292f7db739f94523b48b34208c47731f678bdd165bde3f6c09889658d.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 647, + 296, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 647, + 296, + 718 + ], + "spans": [ + { + "bbox": [ + 50, + 647, + 296, + 718 + ], + "type": "text", + "content": "Here, each iterative unit is indexed by " + }, + { + "bbox": [ + 50, + 647, + 296, + 718 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 50, + 647, + 296, + 718 + ], + "type": "text", + "content": ". The process terminates when a predefined condition " + }, + { + "bbox": [ + 50, + 647, + 296, + 718 + ], + "type": "inline_equation", + "content": "\\mathcal{T}(Q_t, D_t, C_t)" + }, + { + "bbox": [ + 50, + 647, + 296, + 718 + ], + "type": "text", + "content": " is met, yielding the final response " + }, + { + "bbox": [ + 50, + 647, + 296, + 718 + ], + "type": "inline_equation", + "content": "\\Gamma_{\\mathrm{final}}(C_T)" + }, + { + "bbox": [ + 50, + 647, + 296, + 718 + ], + "type": "text", + "content": ". This recursive mechanism enables dynamic synergy between knowledge acquisition and semantic inference, overcoming the linear limitations of single-cycle retrieval-generation frameworks." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 559, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 559, + 228 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 559, + 228 + ], + "type": "text", + "content": "IR-CoT [78] leverages chain-of-thought reasoning to iteratively construct intermediate logic chains, enabling multi-hop retrieval guided by progressively refined contextual cues. FinSearch [50] introduces a dual-phase architecture that first generates structured search graphs to model temporal and entity dependencies, followed by dynamic query rewriting to optimize financial data retrieval. LevelRAG employs hierarchical validation mechanisms, aggregating multi-granular retrieval results and triggering supplementary retrievals based on context completeness assessments. ITER-RETGEN [68] utilizes generation-enhanced feedback loops to iteratively refine query representations, enhancing semantic alignment between retrieval and generation phases." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 228, + 570, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 228, + 570, + 360 + ], + "spans": [ + { + "bbox": [ + 313, + 228, + 570, + 360 + ], + "type": "text", + "content": "These approaches share a common foundation in structured recursion while diverging in operational mechanisms. By enforcing deterministic iteration cycles, they balance controlled workflow execution with adaptive semantic exploration, addressing challenges such as multi-step reasoning, temporal coherence, and cross-domain knowledge synthesis. The hybrid paradigm's strength lies in its capacity to decompose complex queries into iterative retrieval-generation units, systematically bridging knowledge gaps while maintaining interpretability and robustness in open-domain problem-solving scenarios." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 380, + 454, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 380, + 454, + 392 + ], + "spans": [ + { + "bbox": [ + 314, + 380, + 454, + 392 + ], + "type": "text", + "content": "4.2 Dynamic RAG Workflow" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 396, + 559, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 396, + 559, + 586 + ], + "spans": [ + { + "bbox": [ + 313, + 396, + 559, + 586 + ], + "type": "text", + "content": "The RAG with dynamic workflow represents an autonomous reasoning architecture centered around LLMs, characterized by the integration of non-deterministic operational workflows and real-time decision-making capabilities. Unlike predefined pipelines, this architecture enables continuous monitoring of reasoning states to dynamically trigger retrieval, generation, or verification operations. The LLM actively evaluates contextual demands during reasoning processes, autonomously determining optimal moments for invoking external tools or resources through a hybrid feedback coordination mechanism. By eliminating fixed iterative units and pre-determined tool-calling sequences, the framework achieves dynamic evolution of execution pathways, demonstrating superior adaptability in complex cognitive tasks through real-time adjustment of computational workflows based on intermediate reasoning outcomes." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 586, + 559, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 586, + 559, + 718 + ], + "spans": [ + { + "bbox": [ + 313, + 586, + 559, + 718 + ], + "type": "text", + "content": "This dynamic architecture manifests three principal characteristics: 1) Operator invocation is governed by the LLM's contextual state analysis, exemplified through special token prediction (e.g., '[Web-Search]' or `') to initiate external operations; 2) Reasoning trajectories exhibit high flexibility, allowing dynamic query reformulation and sub-problem generation to overcome limitations of static workflows; 3) Context-driven decision mechanisms prioritize real-time reasoning states over predefined rules, enhancing systemic responsiveness to emergent task complexities while improving precision." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "text", + "content": "Synergizing RAG and Reasoning: A Systematic Review" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 399, + 47, + 558, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 47, + 558, + 57 + ], + "spans": [ + { + "bbox": [ + 399, + 47, + 558, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 72, + 295, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 72, + 295, + 121 + ], + "spans": [ + { + "bbox": [ + 50, + 72, + 295, + 121 + ], + "type": "text", + "content": "Defining the reasoning state at time " + }, + { + "bbox": [ + 50, + 72, + 295, + 121 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 50, + 72, + 295, + 121 + ], + "type": "text", + "content": " as " + }, + { + "bbox": [ + 50, + 72, + 295, + 121 + ], + "type": "inline_equation", + "content": "S_{t} = (H_{t}, C_{t})" + }, + { + "bbox": [ + 50, + 72, + 295, + 121 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 50, + 72, + 295, + 121 + ], + "type": "inline_equation", + "content": "H_{t}" + }, + { + "bbox": [ + 50, + 72, + 295, + 121 + ], + "type": "text", + "content": " denotes historical information aggregation and " + }, + { + "bbox": [ + 50, + 72, + 295, + 121 + ], + "type": "inline_equation", + "content": "C_{t}" + }, + { + "bbox": [ + 50, + 72, + 295, + 121 + ], + "type": "text", + "content": " represents contextual embedding vectors, the decision process is modeled as a stochastic system:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 140, + 135, + 295, + 148 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 135, + 295, + 148 + ], + "spans": [ + { + "bbox": [ + 140, + 135, + 295, + 148 + ], + "type": "interline_equation", + "content": "a _ {t + 1} \\sim \\pi \\left(S _ {t}; \\Theta\\right) \\tag {5}", + "image_path": "f1d1f0959136d71cea86d5be0b8740d2d2d379b3fc2093488ed9cf7e2243fbb0.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 127, + 167, + 295, + 180 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 167, + 295, + 180 + ], + "spans": [ + { + "bbox": [ + 127, + 167, + 295, + 180 + ], + "type": "interline_equation", + "content": "S _ {t + 1} = \\delta \\left(S _ {t}, \\mathcal {T} _ {a _ {t + 1}} \\left(S _ {t}\\right)\\right) \\tag {6}", + "image_path": "260a7df8affafd639348b6224f0dabe59b9ebb2d9c5673bcb9348e540aa9a196.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "spans": [ + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "inline_equation", + "content": "\\pi : S \\to \\Delta(\\mathcal{A})" + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "text", + "content": " constitutes the policy function mapping states to probability distributions over action space " + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "text", + "content": " (retrieval, generation, verification, etc.), while " + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_a" + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "text", + "content": " denotes state transition functions corresponding to action " + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "text", + "content": ". The non-Markovian nature of the system emerges from " + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "inline_equation", + "content": "S_{t+1}" + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "text", + "content": "'s dependence on complete historical trajectories " + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "inline_equation", + "content": "\\{S_{\\leq t}\\}" + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "text", + "content": ", with dynamic adaptability ensured through extensible action spaces " + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "text", + "content": " and online optimization of policy parameters " + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "inline_equation", + "content": "\\Theta" + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "text", + "content": ". This formulation enables context-sensitive state updates via " + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "inline_equation", + "content": "\\delta : S \\times \\mathcal{O} \\to S" + }, + { + "bbox": [ + 50, + 184, + 295, + 316 + ], + "type": "text", + "content": ", establishing a theoretical foundation for open-ended reasoning processes in complex problem domains." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 316, + 295, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 316, + 295, + 627 + ], + "spans": [ + { + "bbox": [ + 50, + 316, + 295, + 627 + ], + "type": "text", + "content": "Based on the mode of reasoning initiation, agentic RAG with dynamic workflows can be further categorized into three distinct types: Proactivity-driven, Reflection-driven, and Feedback-driven mechanisms. The LLM proactivity-driven approach is characterized by the model's autonomous triggering of actions based on internal assessments, executing operations without external intervention through mechanisms analogous to human intuitive decision-making—for instance, when the model independently identifies insufficient evidentiary support in the current reasoning process, it proactively generates retrieval requests to supplement information. The reflection-driven mode emphasizes self-examination of the reasoning process, dynamically initiating subsequent operations through quantitative evaluation of intermediate result quality (e.g., triggering actions when the calculated reasoning support score of 0.7 exceeds a predefined threshold of 0.6), which simulates the self-optimization logic of expert systems, enabling the model to adjust reasoning pathways through introspection. The feedback-driven mechanism incorporates external intervention, employing independent models or rule-based systems to perform real-time scoring of intermediate states (e.g., an external reward model assigning a 2.5/5 score to reasoning steps) while providing corrective suggestions, operating similarly to a mentor-guided mode that continuously calibrates the reasoning workflow through external feedback signals." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 634, + 295, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 634, + 295, + 718 + ], + "spans": [ + { + "bbox": [ + 50, + 634, + 295, + 718 + ], + "type": "text", + "content": "4.2.1 Proactivity-Driven Reasoning. The core innovation of Proactivity-driven Reasoning lies in enabling LLMs to fully govern the reasoning process through self-triggered prediction mechanisms. This active control manifests through three key mechanisms: (1) direct tool invocation via model-generated special tokens (e.g., [Web-Search]), without external intervention, (2) context-aware decision making based" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 72, + 559, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 559, + 108 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 559, + 108 + ], + "type": "text", + "content": "on real-time knowledge gaps or hypothesis verification requirements, and (3) Markov Decision Process (MDP)-based dynamic path optimization." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "spans": [ + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "type": "text", + "content": "Formally, the reasoning process can be modeled as a state sequence " + }, + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "type": "inline_equation", + "content": "S = \\{s_0, s_1, \\ldots, s_t\\}" + }, + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "type": "text", + "content": ", where each state " + }, + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "type": "inline_equation", + "content": "s_t" + }, + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "type": "text", + "content": " encapsulates the current reasoning context. At each step " + }, + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "type": "text", + "content": ", the LLM selects an action " + }, + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "type": "inline_equation", + "content": "a_t \\in \\{\\text{retrieve, generate, terminate}\\}" + }, + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "type": "text", + "content": " based on " + }, + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "type": "inline_equation", + "content": "s_t" + }, + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "type": "text", + "content": ", executes the corresponding operation (e.g., document retrieval or answer generation), and updates its state through transition function " + }, + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "type": "inline_equation", + "content": "s_{t+1} = \\delta(s_t, a_t, o_t)" + }, + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "type": "inline_equation", + "content": "o_t" + }, + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "type": "text", + "content": " represents action outcomes. This MDP framework enables dynamic path adjustment through real-time feedback until termination (" + }, + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "type": "inline_equation", + "content": "a_T = \\text{terminate}" + }, + { + "bbox": [ + 313, + 108, + 559, + 228 + ], + "type": "text", + "content": ") and final answer generation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 228, + 569, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 228, + 569, + 372 + ], + "spans": [ + { + "bbox": [ + 313, + 228, + 569, + 372 + ], + "type": "text", + "content": "Recent advancements demonstrate significant improvements over conventional RAG approaches. The Agentic Reasoning framework achieves granular control through dynamic tool invocation, eliminating predefined execution sequences. DeepRAG [24] optimizes cost-accuracy tradeoffs via MDP-based imitation learning, addressing the retrieval-generation disconnection in traditional systems. CoRAG [83] introduces hybrid-driven mechanisms combining LLM-initiated subqueries with external policy control, enhancing error tolerance for complex queries. Collectively, these approaches establish a paradigm shift from fixed pipelines to context-sensitive, self-optimizing reasoning architectures." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 377, + 564, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 377, + 564, + 521 + ], + "spans": [ + { + "bbox": [ + 313, + 377, + 564, + 521 + ], + "type": "text", + "content": "4.2.2 Reflection-Driven Reasoning. The reflection-driven mechanism represents a dynamic reasoning framework that enables iterative self-evaluation and revision of intermediate outputs through model introspection. Common methods include: (1) a evaluation system combining explicit token prediction and implicit confidence scoring, (2) self-monitoring capabilities through grounding tokens for content-document consistency verification and utility tokens for answer effectiveness assessment, and (3) adaptive routing mechanisms that automatically select single-hop or multi-hop reasoning paths based on contextual complexity. The mathematical formalism of this process can be expressed as:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 344, + 529, + 559, + 562 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 344, + 529, + 559, + 562 + ], + "spans": [ + { + "bbox": [ + 344, + 529, + 559, + 562 + ], + "type": "interline_equation", + "content": "\\mathcal {P} = \\bigcup_ {t = 1} ^ {T} \\left[ G \\left(\\mathbf {C} _ {t}\\right)\\rightarrow E \\left(\\mathbf {H} _ {t}, \\mathcal {D}\\right)\\rightarrow \\psi \\left(\\phi \\left(\\mathbf {e} _ {t}\\right), \\tau\\right)\\right] \\tag {7}", + "image_path": "6040981e5ae6dc2acb3aecd45dada104998a9d10d8cf8028b4baf25c8047693b.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "spans": [ + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "type": "text", + "content": " denotes the generation function operating on current context " + }, + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "type": "inline_equation", + "content": "\\mathbf{c}_t" + }, + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "type": "text", + "content": " represents the evaluation function that assesses hidden states " + }, + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "type": "inline_equation", + "content": "\\mathbf{h}_t" + }, + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "type": "text", + "content": " against external knowledge base " + }, + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "type": "text", + "content": " serves as the confidence mapping function, " + }, + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "type": "text", + "content": " is the decision threshold, and " + }, + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 313, + 563, + 559, + 622 + ], + "type": "text", + "content": " functions as the branch selector." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 622, + 559, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 622, + 559, + 718 + ], + "spans": [ + { + "bbox": [ + 313, + 622, + 559, + 718 + ], + "type": "text", + "content": "In practical implementations like Self-RAG [3], this framework generates candidate responses alongside reflection tokens, computes passage relevance scores (ISREL " + }, + { + "bbox": [ + 313, + 622, + 559, + 718 + ], + "type": "inline_equation", + "content": "\\in" + }, + { + "bbox": [ + 313, + 622, + 559, + 718 + ], + "type": "text", + "content": " [0,1]) and factual support metrics (ISSUP), and employs weighted aggregation of token probabilities in " + }, + { + "bbox": [ + 313, + 622, + 559, + 718 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 313, + 622, + 559, + 718 + ], + "type": "text", + "content": " to determine retrieval activation or generation revision through threshold-based " + }, + { + "bbox": [ + 313, + 622, + 559, + 718 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 313, + 622, + 559, + 718 + ], + "type": "text", + "content": " operations. Meanwhile, Open-RAG [38] incorporates hybrid threshold mechanisms and Mixture-of-Experts architecture" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "spans": [ + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "type": "text", + "content": "Gao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 72, + 295, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 72, + 295, + 179 + ], + "spans": [ + { + "bbox": [ + 50, + 72, + 295, + 179 + ], + "type": "text", + "content": "to enforce counterfactual verification through non-retrieval confidence scoring " + }, + { + "bbox": [ + 50, + 72, + 295, + 179 + ], + "type": "inline_equation", + "content": "(\\mathrm{Pr}_{\\mathrm{NoRT}})" + }, + { + "bbox": [ + 50, + 72, + 295, + 179 + ], + "type": "text", + "content": ", enabling dynamic expansion of complex reasoning capabilities while preserving base model efficiency. ReaRAG [49] utilizes knowledge-guided reasoning chains combined with external knowledge sources to perform reflection-driven reasoning. In each iteration, it adjusts the reasoning path through the \"Thought-Action-Observation\" paradigm, effectively preventing error propagation and improving answer accuracy." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 180, + 295, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 180, + 295, + 288 + ], + "spans": [ + { + "bbox": [ + 50, + 180, + 295, + 288 + ], + "type": "text", + "content": "The paradigm's innovation lies in reconstructing traditional sequential processes into conditional Markov decision processes, where state transition probabilities " + }, + { + "bbox": [ + 50, + 180, + 295, + 288 + ], + "type": "inline_equation", + "content": "P(s_{t + 1}|s_t)" + }, + { + "bbox": [ + 50, + 180, + 295, + 288 + ], + "type": "text", + "content": " are dynamically determined by model self-evaluation outcomes. Compared to proactive LLM-driven methods (e.g., Toolformer's direct API invocation), the reflection-driven approach establishes closed-loop control through explicit evaluation stages (function " + }, + { + "bbox": [ + 50, + 180, + 295, + 288 + ], + "type": "inline_equation", + "content": "E" + }, + { + "bbox": [ + 50, + 180, + 295, + 288 + ], + "type": "text", + "content": "), effectively mitigating hallucination risks while maintaining computational efficiency." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "spans": [ + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "text", + "content": "4.2.3 Feedback-Driven Reasoning. The feedback-driven dynamic RAG system establishes closed-loop control over reasoning processes through external signals, formally modeled as a Partially Observable Markov Decision Process. The system state " + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "inline_equation", + "content": "s_t = (q_t, \\mathcal{K}_t, H_t)" + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "text", + "content": " evolves through iterative interactions, comprising the current query representation " + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "inline_equation", + "content": "q_t" + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "text", + "content": ", dynamic knowledge base " + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_t" + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "text", + "content": ", and historical trajectory " + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "inline_equation", + "content": "\\mathcal{H}_t" + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "text", + "content": ". Initialized with " + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "inline_equation", + "content": "q_0" + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_0 = \\emptyset" + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "text", + "content": ", the policy function " + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "inline_equation", + "content": "\\pi(a_t | s_t)" + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "text", + "content": " generates actions from the operational space " + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "inline_equation", + "content": "\\mathcal{A} = \\{\\text{Retrieive}, \\text{Reason}, \\text{Verify}, \\text{Answer}, \\emptyset\\}" + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "text", + "content": ". State transitions follow " + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "inline_equation", + "content": "s_{t+1} = \\delta(s_t, a_t)" + }, + { + "bbox": [ + 50, + 293, + 295, + 426 + ], + "type": "text", + "content": " with knowledge base updates" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 83, + 429, + 294, + 442 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 429, + 294, + 442 + ], + "spans": [ + { + "bbox": [ + 83, + 429, + 294, + 442 + ], + "type": "interline_equation", + "content": "\\mathcal {K} _ {t + 1} = \\mathcal {K} _ {t} \\oplus \\operatorname {R e t r i e v e} \\left(q _ {t}\\right) \\cdot \\mathbb {I} \\left(a _ {t} = \\text {R e t r i e v e}\\right) \\tag {8}", + "image_path": "3ea9125fe5a9cbbb68a717cc756f0dcb1188de6dd79cef3d1bd466d0563c9e11.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 445, + 295, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 445, + 295, + 482 + ], + "spans": [ + { + "bbox": [ + 50, + 445, + 295, + 482 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 445, + 295, + 482 + ], + "type": "inline_equation", + "content": "\\oplus" + }, + { + "bbox": [ + 50, + 445, + 295, + 482 + ], + "type": "text", + "content": " denotes incremental updates and " + }, + { + "bbox": [ + 50, + 445, + 295, + 482 + ], + "type": "inline_equation", + "content": "\\mathbb{I}" + }, + { + "bbox": [ + 50, + 445, + 295, + 482 + ], + "type": "text", + "content": " represents an indicator function. The reward function " + }, + { + "bbox": [ + 50, + 445, + 295, + 482 + ], + "type": "inline_equation", + "content": "R(s_{t},a_{t},s_{t + 1})\\to r_{t}" + }, + { + "bbox": [ + 50, + 445, + 295, + 482 + ], + "type": "text", + "content": " drives policy optimization through" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 97, + 486, + 294, + 498 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 486, + 294, + 498 + ], + "spans": [ + { + "bbox": [ + 97, + 486, + 294, + 498 + ], + "type": "interline_equation", + "content": "\\pi_ {t + 1} = \\Omega \\left(\\pi_ {t}, \\nabla_ {\\theta} \\mathbb {E} _ {a \\sim \\pi_ {t}} \\left[ R \\left(s _ {t}, a, s _ {t + 1}\\right) \\right]\\right) \\tag {9}", + "image_path": "50f75c55705e9ac547c3495a6dc4aa6204a63d2547e17f7bca84b3e970ddf703.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 502, + 295, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 502, + 295, + 525 + ], + "spans": [ + { + "bbox": [ + 50, + 502, + 295, + 525 + ], + "type": "text", + "content": "forming an adaptive control loop. Three distinct feedback mechanisms emerge within this framework." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 526, + 298, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 526, + 298, + 572 + ], + "spans": [ + { + "bbox": [ + 51, + 526, + 298, + 572 + ], + "type": "text", + "content": "Explicit reward feedback employs specialized models " + }, + { + "bbox": [ + 51, + 526, + 298, + 572 + ], + "type": "inline_equation", + "content": "\\pi_{\\mathrm{reward}}" + }, + { + "bbox": [ + 51, + 526, + 298, + 572 + ], + "type": "text", + "content": " for quantitative evaluation, exemplified by RAG-Gym's process rewards [96]. The reward function combines immediate and terminal rewards:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 93, + 576, + 294, + 591 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 576, + 294, + 591 + ], + "spans": [ + { + "bbox": [ + 93, + 576, + 294, + 591 + ], + "type": "interline_equation", + "content": "r _ {t} = \\lambda_ {1} \\pi_ {\\text {r e w a r d}} \\left(s _ {t}\\right) + \\lambda_ {2} \\mathbb {E} _ {s _ {t + k}} \\left[ \\gamma^ {k} R _ {\\text {t e r m i n a l}} \\right] \\tag {10}", + "image_path": "e3f98099e87667aff955149c5c01090756f51e471b3f9c11051c7cdc075eabb0.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 594, + 295, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 594, + 295, + 618 + ], + "spans": [ + { + "bbox": [ + 50, + 594, + 295, + 618 + ], + "type": "text", + "content": "with discount factor " + }, + { + "bbox": [ + 50, + 594, + 295, + 618 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 50, + 594, + 295, + 618 + ], + "type": "text", + "content": ". SmartRAG extends this through policy gradient optimization" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 91, + 621, + 294, + 653 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 621, + 294, + 653 + ], + "spans": [ + { + "bbox": [ + 91, + 621, + 294, + 653 + ], + "type": "interline_equation", + "content": "\\nabla_ {\\theta} J (\\theta) = \\mathbb {E} _ {\\tau \\sim \\pi_ {\\theta}} [ \\sum_ {t = 0} ^ {T} \\nabla_ {\\theta} \\log \\pi_ {\\theta} (a _ {t} | s _ {t}) \\hat {A} _ {t} ] \\tag {11}", + "image_path": "b716652fd0755f035de4e2f35034eceb961ed32d5c7dce43c618b2799251bf91.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 50, + 658, + 295, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 658, + 295, + 680 + ], + "spans": [ + { + "bbox": [ + 50, + 658, + 295, + 680 + ], + "type": "text", + "content": "where the advantage function " + }, + { + "bbox": [ + 50, + 658, + 295, + 680 + ], + "type": "inline_equation", + "content": "\\hat{A}_t" + }, + { + "bbox": [ + 50, + 658, + 295, + 680 + ], + "type": "text", + "content": " integrates temporal feedback." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 50, + 682, + 296, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 682, + 296, + 718 + ], + "spans": [ + { + "bbox": [ + 50, + 682, + 296, + 718 + ], + "type": "text", + "content": "Implicit environmental feedback derives from knowledge base validation, as implemented in KBQA-o1's SPARQL verification and SolutionRAG's pruning mechanisms [58]." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 72, + 559, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 559, + 133 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 559, + 133 + ], + "type": "text", + "content": "This feedback is formalized as " + }, + { + "bbox": [ + 314, + 72, + 559, + 133 + ], + "type": "inline_equation", + "content": "r_t = \\mathbb{I}(\\mathcal{K}_t\\models q_0)\\cdot c_{\\mathrm{valid}} - \\mathbb{I}(\\bot \\in \\mathcal{K}_t)\\cdot c_{\\mathrm{invalid}}" + }, + { + "bbox": [ + 314, + 72, + 559, + 133 + ], + "type": "text", + "content": " with validation function " + }, + { + "bbox": [ + 314, + 72, + 559, + 133 + ], + "type": "inline_equation", + "content": "\\mathbb{I}(\\cdot)" + }, + { + "bbox": [ + 314, + 72, + 559, + 133 + ], + "type": "text", + "content": " and penalty coefficients " + }, + { + "bbox": [ + 314, + 72, + 559, + 133 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 314, + 72, + 559, + 133 + ], + "type": "text", + "content": ". ReARTeR [75] introduces threshold-triggered correction: when " + }, + { + "bbox": [ + 314, + 72, + 559, + 133 + ], + "type": "inline_equation", + "content": "r_t < \\tau" + }, + { + "bbox": [ + 314, + 72, + 559, + 133 + ], + "type": "text", + "content": ", it activates refinement loops " + }, + { + "bbox": [ + 314, + 72, + 559, + 133 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_{t + 1} = \\mathrm{PEM}(\\mathcal{K}_t,q_0)\\oplus \\mathrm{Retrieve}(\\mathrm{PRM}(s_t))" + }, + { + "bbox": [ + 314, + 72, + 559, + 133 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 133, + 558, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 133, + 558, + 168 + ], + "spans": [ + { + "bbox": [ + 314, + 133, + 558, + 168 + ], + "type": "text", + "content": "Structured rule feedback encodes domain knowledge through differentiable scoring functions. MCTS-KBQA [97] implements depth-attenuated rewards" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 372, + 178, + 558, + 209 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 372, + 178, + 558, + 209 + ], + "spans": [ + { + "bbox": [ + 372, + 178, + 558, + 209 + ], + "type": "interline_equation", + "content": "r _ {t} = \\frac {1}{1 + \\alpha d _ {t}} \\sum_ {i = 1} ^ {n} \\mathrm {L L M} _ {\\text {s c o r e}} \\left(a _ {t} ^ {(i)}\\right) \\tag {12}", + "image_path": "0bb1f01e1049a5a3fe1d835a65ca2f133ec47d989cefa8141411e914b10cf409.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 219, + 559, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 219, + 559, + 255 + ], + "spans": [ + { + "bbox": [ + 314, + 219, + 559, + 255 + ], + "type": "text", + "content": "with search depth " + }, + { + "bbox": [ + 314, + 219, + 559, + 255 + ], + "type": "inline_equation", + "content": "d_t" + }, + { + "bbox": [ + 314, + 219, + 559, + 255 + ], + "type": "text", + "content": " and decay coefficient " + }, + { + "bbox": [ + 314, + 219, + 559, + 255 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 314, + 219, + 559, + 255 + ], + "type": "text", + "content": ". CR-Planner's hierarchical critique combines subgoal and execution scores: " + }, + { + "bbox": [ + 314, + 219, + 559, + 255 + ], + "type": "inline_equation", + "content": "r_t^{\\mathrm{total}} = \\beta_1\\pi_{\\mathrm{sub}}(s_t) + \\beta_2\\pi_{\\mathrm{exec}}(a_t|s_t)" + }, + { + "bbox": [ + 314, + 219, + 559, + 255 + ], + "type": "text", + "content": " through weighted fusion." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 255, + 559, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 255, + 559, + 362 + ], + "spans": [ + { + "bbox": [ + 313, + 255, + 559, + 362 + ], + "type": "text", + "content": "These feedback mechanisms interact through a unified strategy update framework, where external feedback-driven approaches achieve controllable optimization of the reasoning process through interpretable feedback signals while maintaining the generative capabilities of LLMs. Overall, the dynamic process of RAG, by endowing the model with autonomy in the reasoning process, not only enhances adaptability to complex tasks but also provides a new solution for efficient reasoning in resource-constrained environments." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 376, + 523, + 389 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 376, + 523, + 389 + ], + "spans": [ + { + "bbox": [ + 315, + 376, + 523, + 389 + ], + "type": "text", + "content": "5 Implementation and Optimization" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 392, + 559, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 392, + 559, + 487 + ], + "spans": [ + { + "bbox": [ + 313, + 392, + 559, + 487 + ], + "type": "text", + "content": "Building upon preceding sections, this section systematically analyzes the concrete implementation and optimization strategies for reasoning within the RAG paradigm. In contrast to existing surveys that predominantly focus on posttraining methodologies or isolated LLM reasoning mechanisms, our analysis maintains a dedicated focus on the synergistic integration of RAG with reasoning examining their co-adaptive implementations through a structural lens." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 315, + 500, + 424, + 511 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 500, + 424, + 511 + ], + "spans": [ + { + "bbox": [ + 315, + 500, + 424, + 511 + ], + "type": "text", + "content": "5.1 Reasoning Process" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 514, + 559, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 514, + 559, + 718 + ], + "spans": [ + { + "bbox": [ + 313, + 514, + 559, + 718 + ], + "type": "text", + "content": "5.1.1 LLM CoT. Integrating Chain-of-Thought (CoT) reasoning with LLMs is key to combining RAG with complex reasoning tasks. Research shows CoT enhances RAG systems by explicitly guiding multi-step reasoning and dynamically incorporating external knowledge. For example, ActiveRAG [100] uses a \"Self-Inquiry " + }, + { + "bbox": [ + 313, + 514, + 559, + 718 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 313, + 514, + 559, + 718 + ], + "type": "text", + "content": " Knowledge Assimilation " + }, + { + "bbox": [ + 313, + 514, + 559, + 718 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 313, + 514, + 559, + 718 + ], + "type": "text", + "content": " Thought Accommodation\" chain to align knowledge and reasoning: a knowledge assimilation agent merges external documents with LLM memory via operations like association and reflection, creating structured knowledge. Meanwhile, a reasoning adaptation agent refines inference chains from Self-Inquiry to ensure answers align with retrieved knowledge and address reasoning gaps. Similarly, Adaptive-RAG [41] alternates between CoT and retrieval, breaking down multi-hop reasoning into steps such as entity localization and document correlation, refining retrieval and generation based on prior results." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "text", + "content": "Synergizing RAG and Reasoning: A Systematic Review" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 399, + 47, + 558, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 47, + 558, + 57 + ], + "spans": [ + { + "bbox": [ + 399, + 47, + 558, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 69, + 558, + 210 + ], + "blocks": [ + { + "bbox": [ + 52, + 69, + 558, + 210 + ], + "lines": [ + { + "bbox": [ + 52, + 69, + 558, + 210 + ], + "spans": [ + { + "bbox": [ + 52, + 69, + 558, + 210 + ], + "type": "image", + "image_path": "a797186982b7420dcac71a470f6aca1de11923d2ffcfd02c0fb32375430a9b11.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 52, + 213, + 558, + 329 + ], + "blocks": [ + { + "bbox": [ + 52, + 213, + 558, + 329 + ], + "lines": [ + { + "bbox": [ + 52, + 213, + 558, + 329 + ], + "spans": [ + { + "bbox": [ + 52, + 213, + 558, + 329 + ], + "type": "image", + "image_path": "cc011ed02bfe16c008fa59b27f259cd658bc4f9700c3e26493d504dd622d891f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 123, + 341, + 488, + 354 + ], + "lines": [ + { + "bbox": [ + 123, + 341, + 488, + 354 + ], + "spans": [ + { + "bbox": [ + 123, + 341, + 488, + 354 + ], + "type": "text", + "content": "Figure 6. Implementation and optimization of the synergy between RAG and Reasoning" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 369, + 296, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 369, + 296, + 525 + ], + "spans": [ + { + "bbox": [ + 50, + 369, + 296, + 525 + ], + "type": "text", + "content": "At the knowledge and reasoning level, O1-Embedder [101] drives RAG through open-ended long-text reasoning, extending CoT beyond fixed triggers via coherent thought processes like problem decomposition. PlanRAG [48] explicitly uses CoT to produce executable multi-step plans, adjusting operations dynamically through a closed-loop \"plan-execute-feedback\" cycle. Despite different implementations, these methods share two CoT strengths: breaking down complex problems into clear intermediate steps and guiding external knowledge selection through reasoning states. Studies show these approaches outperform traditional RAG in multi-hop QA and knowledge-intensive tasks by enhancing both LLMs' reasoning and adaptability to external knowledge." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 551, + 315, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 551, + 315, + 718 + ], + "spans": [ + { + "bbox": [ + 50, + 551, + 315, + 718 + ], + "type": "text", + "content": "5.1.2 Special Token Prediction. Recent advances active RAG also highlight special token prediction as a key method for dynamically linking external knowledge retrieval with multi-step reasoning [16]. By embedding domain- or action-specific tokens (e.g., '[Web-search]', '[Retrieve=Yes)', `') into LLM vocabularies, models can autonomously trigger tools or self-reflect during text generation. Frameworks like Self-RAG [3] and SmartRAG [20] use dedicated tokens ('Retrieve', 'ISREL', '[RETRIEVE]') to manage retrieval activation, relevance checks, and output verification, turning static reasoning chains into conditional workflows. The innovation lies in predicting these tokens within generated sequences, segmenting tasks into retrieval initiation, document evaluation, and knowledge grounding phases." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 369, + 559, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 369, + 559, + 513 + ], + "spans": [ + { + "bbox": [ + 313, + 369, + 559, + 513 + ], + "type": "text", + "content": "Hybrid models such as Open-RAG [38] combine token control with mixture-of-experts (MoE) routing, sparsely activating experts aligned with token-predicted reasoning. Unlike traditional chain-of-thought or search tree methods, special token prediction offers finer control and interpretability by encoding decision logic explicitly in token sequences while maintaining end-to-end training. This approach also overcomes latency and inflexibility of preset retrieval schedules by enabling context-aware, on-demand tool use. For example, R1-Searcher [72] and Search-o1 [51] use token boundaries like `' to coordinate retrieval pauses and resume generation after knowledge integration." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 513, + 559, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 513, + 559, + 573 + ], + "spans": [ + { + "bbox": [ + 313, + 513, + 559, + 573 + ], + "type": "text", + "content": "Together, these systems show that token-level prediction not only bridges reasoning and retrieval but also creates a scalable framework for tool-enhanced language agents, preserving generative fluency while enabling systematic external knowledge integration and procedural reasoning." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 598, + 559, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 598, + 559, + 682 + ], + "spans": [ + { + "bbox": [ + 313, + 598, + 559, + 682 + ], + "type": "text", + "content": "5.1.3 Search-Driven Reasoning. Recent advancements in search-driven reasoning have significantly improved RAG frameworks by employing structured search strategies for dynamic information exploration and multi-step reasoning with external knowledge. Current approaches mainly follow three paradigms: tree-based search, MCTS, and reinforcement learning-optimized policy networks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 682, + 559, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 682, + 559, + 718 + ], + "spans": [ + { + "bbox": [ + 313, + 682, + 559, + 718 + ], + "type": "text", + "content": "Tree-based methods organize reasoning hierarchically through structured path exploration. For example, StePO-Rec [5] uses a multi-step tree-structured reasoning method" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "spans": [ + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "type": "text", + "content": "Gao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 72, + 294, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 72, + 294, + 227 + ], + "spans": [ + { + "bbox": [ + 53, + 72, + 294, + 227 + ], + "type": "text", + "content": "that iteratively retrieves different outfit matching knowledge and user preferences at each node, ultimately achieving generative recommendations for complementary items. OmniThink [94] uses an information tree to expand topic analysis by generating subqueries that guide breadth-first or depth-first retrievals. DeepRAG [24] applies a binary tree search within a Markov decision process to explore parametric knowledge and retrieval paths in parallel, selecting optimal branches. DeepSolution's [54] bidirectional thinking tree alternates expanding solution and critique nodes with scoring for path pruning, aligning naturally with MCTS evaluation. These methods balance exploration efficiency with solution coverage through explicit tree structures." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 228, + 294, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 228, + 294, + 395 + ], + "spans": [ + { + "bbox": [ + 53, + 228, + 294, + 395 + ], + "type": "text", + "content": "MCTS enhances robustness by optimizing long-term decisions via simulation, evaluation, and backpropagation. CR-Planner [52] integrates MCTS with the UCB strategy to balance exploration and exploitation while estimating optimal subgoals through multi-step simulations. KBQA-O1 [58] and MCTS-KBQA [97] generate candidate actions using policy models and combine reward models to globally assess logical forms, reducing local optima. ReARTeR [75] innovatively merges MCTS with procedural reward models (PRMs), interleaving retrieval and reasoning steps, and filtering high-reward paths to form a closed-loop \"reason-retrieve-reason\" cycle. These methods probabilistically explore paths and use reinforcement learning feedback to improve global reasoning for complex tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 396, + 294, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 396, + 294, + 479 + ], + "spans": [ + { + "bbox": [ + 53, + 396, + 294, + 479 + ], + "type": "text", + "content": "Reinforcement learning-optimized policy networks adaptively refine search strategies. LeReT [34] replaces fixed search algorithms with reinforcement learning (e.g., IPO) to dynamically optimize query generation based on rewards like retrieval accuracy, implicitly learning optimal search patterns without explicit tree or graph structures, thus offering greater flexibility and scalability." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 479, + 294, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 479, + 294, + 574 + ], + "spans": [ + { + "bbox": [ + 53, + 479, + 294, + 574 + ], + "type": "text", + "content": "In summary, search-driven reasoning unites inference and retrieval through structured strategies, combining multi-path exploration, dynamic evaluation, and adaptive optimization to deliver interpretable, efficient solutions for knowledge-intensive tasks. Future work may focus on hybrid paradigms (e.g., integrating MCTS and reinforcement learning) and lightweight algorithms to balance performance with computational efficiency." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 599, + 298, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 599, + 298, + 718 + ], + "spans": [ + { + "bbox": [ + 53, + 599, + 298, + 718 + ], + "type": "text", + "content": "5.1.4 Reasoning on Graph. Graph-structured reasoning offers a novel approach for multi-hop inference in RAG systems by explicitly modeling knowledge interaction paths through topology. Current methods fall into two categories: query-flow-oriented search graphs (e.g. FinSearch [50]) and knowledge-association-based expansion graphs (ToG-2.0 [60]) FinSearch builds a directed acyclic graph (DAG) where nodes are atomic subqueries (e.g., stock prices, financial reports) and edges capture logical and temporal dependencies. A pre-planner breaks down queries into subquery sequences," + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 317, + 72, + 558, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 72, + 558, + 119 + ], + "spans": [ + { + "bbox": [ + 317, + 72, + 558, + 119 + ], + "type": "text", + "content": "using graph traversal to control information flow and dynamically adjust paths, such as backtracking when conflicts arise—substantially surpassing linear chain-of-thought methods in handling complex logic." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 317, + 133, + 558, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 133, + 558, + 418 + ], + "spans": [ + { + "bbox": [ + 317, + 133, + 558, + 418 + ], + "type": "text", + "content": "5.1.5 External Solver. The integration of RAG and reasoning is also can be achieved by incorporating external solvers, where specialized solvers, such as the Alignment-Oriented LLM-based Retrieval Method (ARM), are employed to handle the reasoning component. The retrieval process for complex problems is formulated as a global optimization task, leveraging external solvers like mixed-integer programming (MIP) to achieve structural alignment and joint optimization of data objects. Specifically, ARM first decomposes user queries into keywords that match N-grams in the dataset through an information alignment module, generating an initial set of retrieval candidates via constrained decoding. Subsequently, in the structural alignment phase, the MIP solver performs global filtering on candidate objects based on a predefined objective function that maximizes both the relevance of retrieved objects to the query and their mutual compatibility. This ensures that the selected objects not only cover the requirements of the query but also form a coherent information chain through entity or inter-table linkages. Finally, the self-verification mechanism of the LLM, combined with a beam search-based aggregation strategy, dynamically refines and consolidates multiple candidate sets, ultimately producing a retrieval collection that satisfies both semantic matching and the structural organization of the data." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 317, + 419, + 558, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 419, + 558, + 669 + ], + "spans": [ + { + "bbox": [ + 317, + 419, + 558, + 669 + ], + "type": "text", + "content": "ToG-2.0 achieves multi-hop expansion by integrating knowledge graphs with documents, starting from an initial entity and iteratively extending relevant entities and relations (such as corporate ownership chains and technology dependency networks) via the Edge function. This process constructs structured triple paths while simultaneously retrieving and verifying document content. By tuning the width and depth parameters, the method emulates human reasoning: broadly exploring potential associations before deeply verifying high-confidence paths. FRAG [23] dynamically adjusts retrieval strategies by predicting the hop range of reasoning paths based solely on the query text, thereby enhancing retrieval quality without requiring additional fine-tuning or invocation of large language models, enabling flexible and efficient retrieval optimization. FG-RAG [32] further expands entity coverage in graph retrieval through context-aware entity expansion, providing richer background information. Combined with query-level fine-grained summary generation, FG-RAG transforms coarse-grained graph information into highly relevant detailed content, effectively improving the performance of query-focused summarization tasks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 317, + 670, + 567, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 670, + 567, + 718 + ], + "spans": [ + { + "bbox": [ + 317, + 670, + 567, + 718 + ], + "type": "text", + "content": "Although differing in design from workflow-based methods, ToG-2.0 shares key advantages with other graph-structured approaches: explicitly modeling reasoning state dependencies, supporting dynamic path generation and optimization," + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 48, + 233, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 48, + 233, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 48, + 233, + 57 + ], + "type": "text", + "content": "Synergizing RAG and Reasoning: A Systematic Review" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 400, + 48, + 558, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 400, + 48, + 558, + 57 + ], + "spans": [ + { + "bbox": [ + 400, + 48, + 558, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 72, + 294, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 72, + 294, + 133 + ], + "spans": [ + { + "bbox": [ + 50, + 72, + 294, + 133 + ], + "type": "text", + "content": "and enabling closed-loop interaction between retrieval and reasoning. This effectively overcomes the limitations of traditional RAG in implicit relation inference and counterfactual analysis, thereby establishing an interpretable theoretical and practical framework for knowledge reasoning." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 144, + 187, + 156 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 144, + 187, + 156 + ], + "spans": [ + { + "bbox": [ + 51, + 144, + 187, + 156 + ], + "type": "text", + "content": "5.2 Reasoning Optimization" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 158, + 294, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 158, + 294, + 218 + ], + "spans": [ + { + "bbox": [ + 50, + 158, + 294, + 218 + ], + "type": "text", + "content": "In the previous chapter, we focused on introducing several approaches to integrate reasoning with RAG. This chapter shifts attention to how to augment the reasoning capabilities, specifically including Prompt-Based, Tuning-Based, and RL-Based strategies." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 228, + 295, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 228, + 295, + 346 + ], + "spans": [ + { + "bbox": [ + 50, + 228, + 295, + 346 + ], + "type": "text", + "content": "5.2.1 Prompt-Based. Prompt-Based optimization is a key approach to improving RAG and reasoning system performance by using carefully designed natural language prompts. These prompts break down complex reasoning tasks into manageable steps and guide LLMs to follow specific logical structures during generation. The main advantage is that control over reasoning flow is achieved solely through prompt design, without parameter fine-tuning or reinforcement learning, preserving the model's generalization while enhancing task-specific results." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 347, + 295, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 347, + 295, + 443 + ], + "spans": [ + { + "bbox": [ + 50, + 347, + 295, + 443 + ], + "type": "text", + "content": "This approach has three main features. First, task structuring: prompts explicitly decompose and control reasoning chains via zero-shot or templated designs. Techniques like Co-STORM [43] and WriteHere [98] use role assignments, stage divisions, and operation-specific instructions to guide multi-step reasoning—such as proposal generation, knowledge retrieval, refinement, and validation—improving interpretability by representing intermediate steps clearly." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 443, + 295, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 443, + 295, + 538 + ], + "spans": [ + { + "bbox": [ + 50, + 443, + 295, + 538 + ], + "type": "text", + "content": "Second, result reliability is improved by standardizing outputs and reducing hallucinations. Strategies include requiring citation of retrieval results, enforcing specific output formats, and integrating reflection and calibration based on retrieved knowledge. Systems like FinSearch [50] and ActiveRAG [100] incorporate temporal weighting, deduplication, and domain rules through prompts, enhancing consistency and logical coherence, especially in complex domains." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 539, + 295, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 539, + 295, + 646 + ], + "spans": [ + { + "bbox": [ + 50, + 539, + 295, + 646 + ], + "type": "text", + "content": "Third, interactive adaptability allows dynamic prompt adjustments. Special tokens (e.g., , [Web-search]) enable models to trigger tools or revise queries in real time based on intermediate results. Methods such as Agentic Reasoning [92] and PlanRAG [48] use context-sensitive prompts and feedback loops to refine reasoning paths dynamically, maintaining coherence and accuracy in multi-hop tasks and outperforming traditional RAG methods in complex, evolving scenarios." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 647, + 295, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 647, + 295, + 718 + ], + "spans": [ + { + "bbox": [ + 50, + 647, + 295, + 718 + ], + "type": "text", + "content": "In summary, prompt-based optimization offers an efficient, flexible, and reliable approach to enhancing RAG+Reasoning by emphasizing task structuring, result standardization, and interactive adaptability. Its non-intrusive and broadly applicable design has established it as a mainstream strategy for optimizing LLM reasoning and serves as a foundation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 72, + 559, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 559, + 192 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 559, + 192 + ], + "type": "text", + "content": "for future hybrid methods integrating fine-tuning and reinforcement learning. By systematically optimizing reasoning without altering model parameters through semantic structures, dynamic feedback, and symbolic constraints, this paradigm effectively manages macro-level controls like task decomposition and knowledge integration while addressing key challenges such as generation consistency, logical coherence, and external knowledge alignment. This makes prompt-based optimization a lightweight yet powerful solution for complex reasoning tasks." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 204, + 559, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 204, + 559, + 285 + ], + "spans": [ + { + "bbox": [ + 313, + 204, + 559, + 285 + ], + "type": "text", + "content": "5.2.2 Tuning-Based. The tuning-based approach improves the integration of RAG and reasoning by optimizing model parameters to internalize the retrieval-augmented chain-of-thought mechanism within LLMs. Current research mainly targets three goals: retrieval pathway optimization, structured generation enhancement, and collaborative training with external modules." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 287, + 568, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 287, + 568, + 442 + ], + "spans": [ + { + "bbox": [ + 313, + 287, + 568, + 442 + ], + "type": "text", + "content": "For retrieval pathway optimization, methods like CoRAG [83] and DeepRAG [24] build end-to-end multistep reasoning frameworks through full parameter fine-tuning and multitask learning. CoRAG expands single-step QA datasets into retrieval-reasoning chains and jointly trains tasks such as sub-query generation, intermediate answer prediction, and final composition. This boosts the model's ability to break down complex problems (e.g., multi-entity relational reasoning) and adapt retrieval strategies dynamically (e.g., query rewriting, error correction). DeepRAG combines imitation and contrastive learning with binary tree search to create efficient retrieval paths, using a DPO-style contrastive loss to reduce redundant retrieval while maintaining accuracy." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 443, + 559, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 443, + 559, + 646 + ], + "spans": [ + { + "bbox": [ + 313, + 443, + 559, + 646 + ], + "type": "text", + "content": "To improve structured generation, MCTS-KBQA [97] and Self-RAG [3] fine-tune models for precise special token generation. MCTS-KBQA uses supervised fine-tuning to make large language models output instructions that comply with knowledge graph protocols (e.g., SPARQL), modeling reasoning as executable tool-call sequences. Self-RAG enhances self-supervised generation control by expanding vocabulary and training the model to generate reflection tokens like retrieval triggers and relevance markers, preserving fluency and reducing factual errors. Additionally, O1-Embedder [101] and Open-RAG [38] align semantic spaces via mixed fine-tuning: O1-Embedder combines generative and contrastive training with special tokens to separate generation from embedding tasks, enhancing multihop semantic understanding; Open-RAG uses QLoRA [17] quantized fine-tuning and Mixture of Experts (MoE) modules to specialize networks for single/multi-hop reasoning." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 647, + 559, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 647, + 559, + 718 + ], + "spans": [ + { + "bbox": [ + 313, + 647, + 559, + 718 + ], + "type": "text", + "content": "In collaborative optimization with external modules, AdaptiveRAG [41] and CR-Planner [52] apply parameter isolation to balance generality and adaptability. AdaptiveRAG finetunes a lightweight classifier to select retrieval strategies dynamically. CR-Planner introduces a Critic model trained with contrastive loss on MCTS trajectory data to assess the" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "spans": [ + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "type": "text", + "content": "Gao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 52, + 93, + 560, + 570 + ], + "blocks": [ + { + "bbox": [ + 171, + 71, + 438, + 83 + ], + "lines": [ + { + "bbox": [ + 171, + 71, + 438, + 83 + ], + "spans": [ + { + "bbox": [ + 171, + 71, + 438, + 83 + ], + "type": "text", + "content": "Table 1. Comparison of RL-based RAG with Reasoning Methods" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 93, + 560, + 570 + ], + "lines": [ + { + "bbox": [ + 52, + 93, + 560, + 570 + ], + "spans": [ + { + "bbox": [ + 52, + 93, + 560, + 570 + ], + "type": "table", + "html": "
MethodBase ModelRLParameterSupervisionReward FunctionPolicy Strategy
PORAG [73]Qwen2.5/Llama3.2GRPOQLRAORMDual rewards: \n1. Retrieval fidelity (Rfid) \n2. Response quality (Rqual) \nCombined: R = αRfid + βRqual• Group-based advantage normalization \n• PPO-style clipped objective \n• KL regularization
DeepResearcher [106]Qwen2.5-7BGRPOFullORMFormat compliance penalty (-1) + Answer F1 score• Reference policy constraints \n• KL divergence penalty
ReSearch [6]Qwen2.5-7BGRPOFullORMHybrid rewards: \n• Answer F1 (vs ground truth) \n• Format compliance check• GRPO with clip ratio 0.2 \n• Group advantage normalization (G=5) \n• β = 0.001 KL penalty
ReZero [16]Llama3.2-3BGRPOFullORM+PRM• Answer correctness \n• Format compliance \n• Search diversity \n• Chunk matching \n• Retry behavior \n• Strategy compliance• Intra-group reward comparison \n• Noise-injected robustness training \n• KL constraints
MMOA-RAG [12]Llama-3-8BMAPPOFullORMShared F1 reward + penalties: \n• Excessive sub-questions \n• Document ID errors \n• Answer hesitability• MAPPO actor-critic updates \n• Cosine learning rate scheduling
DeepNote [84]Qwen2.5/Llama3.1DPOFullORMImplicit preference modeling via likelihood contrast• Direct Preference Optimization \n• Preference gap maximization
R1-Searcher [72]Qwen2.5/Llama3.1Reinforce++FullORMTwo-stage rewards: \n1. Retrieval count + format \n2. F1 score + format penalty• RAG-based rollout \n• Retrieval-masked loss
KBQA-O1 [58]Llama3/Qwen2.5/Gemma2MCTSDoRAORM+PRMComposite reward: \n• Stepwise policy model score \n• Final reward model score• MCTS trajectory optimization \n• Q-value backpropagation
DeepRetrieval [42]Qwen2.5-3BPPOFullORMTask metrics: \n• Recall@k/NDCG \n• Syntax validity• GAE advantage estimation \n• Distributed HybridFlow
LeReT [34]Llama3-8B/Gemma-9BIPOFullPRMAverage Precision (AP) of retrieved documents• Identity Policy Optimization \n• Context distillation
SmartRAG [20]Flan-T5-L/Llama2-7BPPOFull/LoRAORMAction-specific: \n• EM+F1 for answers \n• Cost penalty for retrievals• On-policy sampling \n• PPO updates
ReARTeR [75]LLaMA3.1-8BMCTSLoRAORM+PRMMonte Carlo step scoring + TD look-ahead• Iterative preference optimization \n• KTO loss
DeepRAG [24]Qwen2.5-7B/Llama3.1-8BHybridFullORM+PRMCost-aware accuracy: \nR = -C(o) × T(st) \nC(o): Answer correctness \nT(st): Total retrieval cost• Imitation + contrastive learning \n• PPO-like calibration
RAG-Gym [96]LLaMA3.1-8BHybridLoRAPRMTriple criteria: \n• Sufficiency \n• Utility \n• Redundancy• SFT + DPO \n• PRM-guided selection
CR-Planner [52]Skywork-Llama3.1-8BMCTSLoRAPRMCritic-estimated rewards: \n• Stepwise correctness \n• Global impact• MCTS simulation \n• Pairwise ranking loss
", + "image_path": "7887c1ca647a9f89e6f4474b770fc82df542d6a89ee571cf004fffbd171081d9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 113, + 570, + 496, + 582 + ], + "lines": [ + { + "bbox": [ + 113, + 570, + 496, + 582 + ], + "spans": [ + { + "bbox": [ + 113, + 570, + 496, + 582 + ], + "type": "text", + "content": "1ORM: Outcome-based Reward Model; PRM: Process-based Reward Model. 2Full: Full parameter tuning." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 602, + 294, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 602, + 294, + 625 + ], + "spans": [ + { + "bbox": [ + 50, + 602, + 294, + 625 + ], + "type": "text", + "content": "long-term value of reasoning actions, prioritizing efficient solutions in tasks like mathematical reasoning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 626, + 295, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 626, + 295, + 685 + ], + "spans": [ + { + "bbox": [ + 50, + 626, + 295, + 685 + ], + "type": "text", + "content": "Together, these tuning strategies restructure the parameter space to internalize retrieval-reasoning interactions effectively, enhancing the model's ability to solve complex problems while ensuring computational efficiency and broad applicability across domains." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 694, + 297, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 694, + 297, + 718 + ], + "spans": [ + { + "bbox": [ + 51, + 694, + 297, + 718 + ], + "type": "text", + "content": "5.2.3 RL-Based. As shown in Table 1, Reinforcement learning (RL) has recently become pivotal for tackling long-chain" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 602, + 560, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 602, + 560, + 698 + ], + "spans": [ + { + "bbox": [ + 313, + 602, + 560, + 698 + ], + "type": "text", + "content": "reasoning in modern inference models and optimizing RAG combined with reasoning tasks. Central to these advances is the use of dynamic reward mechanisms that guide LLMs to balance knowledge retrieval and logical reasoning adaptively. RL optimization objectives generally fall into two categories: outcome-based reward modeling (ORM) and process-based reward modeling (PRM), with some hybrid approaches blending both to balance global goals and local optimizations." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "text", + "content": "Synergizing RAG and Reasoning: A Systematic Review" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 399, + 47, + 559, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 47, + 559, + 57 + ], + "spans": [ + { + "bbox": [ + 399, + 47, + 559, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 72, + 294, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 72, + 294, + 228 + ], + "spans": [ + { + "bbox": [ + 50, + 72, + 294, + 228 + ], + "type": "text", + "content": "The ORM paradigm focuses solely on the quality of the final output and its adherence to standards. For example, R1-Searcher [72] employs a two-stage Reinforce++ [35] training where rewards in the first stage depend on correct retrieval calls and special token generation, while the second stage directly optimizes the F1 score of answers. This encourages the model to develop strategies maximizing knowledge integration, reducing hallucinations, and enhancing accuracy in multi-hop QA beyond traditional RAG methods. Similarly, KBQA-O1 [58] uses MCTS with a policy network for candidate reasoning paths and a reward model evaluating logical consistency, effectively balancing exploration and exploitation in knowledge base QA." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 228, + 295, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 228, + 295, + 347 + ], + "spans": [ + { + "bbox": [ + 50, + 228, + 295, + 347 + ], + "type": "text", + "content": "Conversely, PRM emphasizes detailed supervision of intermediate reasoning steps. LeReT [34] uses the Identity Policy Optimization (IPO) algorithm, optimizing query quality by rewarding average precision (AP) of retrieved documents, boosting retrieval recall and overall multi-hop task performance. ReARTeR [75] extends this with a step-level binary reward model, combining Monte Carlo scoring and temporal difference (TD) methods to evaluate reasoning paths proactively, reducing logical errors and redundant retrievals, and improving accuracy on benchmarks like HotpotQA." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 347, + 298, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 347, + 298, + 514 + ], + "spans": [ + { + "bbox": [ + 50, + 347, + 298, + 514 + ], + "type": "text", + "content": "Moreover, influenced by DeepSeek-R1, GRPO [69] is also gradually being applied in scenarios combining RAG and Reasoning. GRPO is a variant of the Proximal Policy Optimization (PPO) reinforcement learning algorithm that abandons the critic model and instead estimates the baseline from group scores, significantly reducing training resources. For example, ReZero [16] uses GRPO to introduce a \"retry\" mechanism for LLMs, incentivizing LLMs to keep trying after an initial search failure by rewarding retry search queries. This mechanism simulates the human strategy of \"if at first you don't succeed, try again\" in information retrieval. PORAG [73], based on GRPO, directly optimizes retrieval quality, contextual relevance, and generation coherence through a dual reward mechanism (retrieval fidelity and response quality)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 514, + 295, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 514, + 295, + 646 + ], + "spans": [ + { + "bbox": [ + 50, + 514, + 295, + 646 + ], + "type": "text", + "content": "Hybrid methods merge ORM and PRM to optimize both final outcomes and intermediate steps via composite rewards. SmartRAG [20] applies Proximal Policy Optimization (PPO), combining answer-level F1 rewards with penalties for excessive retrievals, balancing knowledge completeness and efficiency. RAG-Gym [96] advances this with multidimensional process rewards (sufficiency, utility, redundancy) and techniques like contrastive loss and Best-of-N sampling to promote efficient search decisions, even zero-shot. These hybrid strategies markedly lower retrieval costs while sustaining accuracy in complex tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 647, + 295, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 647, + 295, + 707 + ], + "spans": [ + { + "bbox": [ + 50, + 647, + 295, + 707 + ], + "type": "text", + "content": "In addition, we can also observe that in current RL-based methods, academia focuses more on exploration with small-scale LLMs (<8B), among which the Qwen and Llama series are the most widely used. Overall, RL provides a flexible, scalable framework for integrating RAG and reasoning. ORM" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 72, + 559, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 559, + 144 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 559, + 144 + ], + "type": "text", + "content": "guides the discovery of globally optimal strategies, PRM enhances reasoning robustness via local refinements, and their combination addresses modular system limits. Future work may explore collaborative rewards in multi-agent settings, offline RL based on world models, and hierarchical reward decomposition for open-domain applications." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 155, + 524, + 167 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 155, + 524, + 167 + ], + "spans": [ + { + "bbox": [ + 314, + 155, + 524, + 167 + ], + "type": "text", + "content": "6 Downstream Tasks and Evaluation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 171, + 559, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 171, + 559, + 278 + ], + "spans": [ + { + "bbox": [ + 313, + 171, + 559, + 278 + ], + "type": "text", + "content": "While previous chapters focused on methodologies and advances in RAG combined with reasoning, this chapter shifts to tasks and evaluation. It provides a comprehensive overview and analysis of existing tasks, datasets, their current status, and emerging trends. By reviewing these resources, we highlight the landscape's gaps and limitations in current evaluation methods. The chapter also explores key challenges in assessment frameworks, identifying shortcomings and suggesting potential improvements." + } + ] + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 317, + 291, + 558, + 498 + ], + "blocks": [ + { + "bbox": [ + 317, + 291, + 558, + 498 + ], + "lines": [ + { + "bbox": [ + 317, + 291, + 558, + 498 + ], + "spans": [ + { + "bbox": [ + 317, + 291, + 558, + 498 + ], + "type": "image", + "image_path": "1e880ccde31a88477a8599518908e126cc979da6226fba098d13340d6687a5c6.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 510, + 559, + 569 + ], + "lines": [ + { + "bbox": [ + 313, + 510, + 559, + 569 + ], + "spans": [ + { + "bbox": [ + 313, + 510, + 559, + 569 + ], + "type": "text", + "content": "Figure 7. The current downstream tasks and datasets related to the combination of RAG and Reasoning show that multi-hop question answering tasks still dominate. Correspondingly, HotpotQA, 2WikiMultihopQA, and MuSiQue remain the most commonly used evaluation datasets." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 596, + 465, + 607 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 596, + 465, + 607 + ], + "spans": [ + { + "bbox": [ + 314, + 596, + 465, + 607 + ], + "type": "text", + "content": "6.1 Knowledge-Intensive Tasks" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 610, + 559, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 610, + 559, + 718 + ], + "spans": [ + { + "bbox": [ + 313, + 610, + 559, + 718 + ], + "type": "text", + "content": "In the evaluation for RAG systems, knowledge-intensive question answering (QA) remains the primary focus (Figure 7). As LLMs improve in semantic understanding and reasoning, benchmarks have expanded to cover tasks from simple fact retrieval to complex multi-step reasoning. However, evaluation methods specifically designed for RAG lag behind due to the dual challenge of assessing both retrieval-generation coherence and adaptability to dynamic knowledge bases. For example, multi-hop QA requires integrating" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "spans": [ + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "type": "text", + "content": "Gao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 72, + 294, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 72, + 294, + 144 + ], + "spans": [ + { + "bbox": [ + 50, + 72, + 294, + 144 + ], + "type": "text", + "content": "dispersed knowledge through multi-stage retrieval while verifying logical consistency between answers and retrieval paths. This complexity increases dataset construction costs compared to purely generative tasks, keeping research centered on knowledge-intensive QA subcategories such as open-domain QA, knowledge-base QA, and multi-hop QA." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 144, + 310, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 144, + 310, + 348 + ], + "spans": [ + { + "bbox": [ + 50, + 144, + 310, + 348 + ], + "type": "text", + "content": "Commonly used datasets include Natural Questions (NQ) [47] for single-hop factual queries, HotpotQA, 2WikiMultiHopQA [31] and Musique [79] for multi-hop QA. These benchmarks are mostly based on Wikipedia and fail to reflect the RAG demands and corresponding complexity in real-world scenarios. Some efforts have pushed evaluation boundaries, like CRUD-RAG's [59] operational metrics and DomainRAG's [86] domain-specific evaluations, but high costs and metric-task interdependencies limit progress. As a result, knowledge-intensive QA remains central for testing RAG robustness and practicality, highlighting a critical bottleneck: the need for innovative frameworks that balance retrieval flexibility and controlled generation to support new developments like Agentic RAG. Overall, many evaluation benchmarks are lagging behind rapid RAG+Reasoning advances, especially as LLMs grow more powerful. Specifically, the current evaluation of RAG faces the following challenges." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 356, + 294, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 356, + 294, + 440 + ], + "spans": [ + { + "bbox": [ + 50, + 356, + 294, + 440 + ], + "type": "text", + "content": "Limited Challenge. With improving LLM capabilities, many knowledge-based questions are no longer difficult, as they can be answered without external retrieval. Current multi-hop reasoning datasets, often built from artificial templates, offer limited challenge. There is an urgent need for more complex datasets reflecting real-world scenarios and practical use." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 449, + 294, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 449, + 294, + 521 + ], + "spans": [ + { + "bbox": [ + 50, + 449, + 294, + 521 + ], + "type": "text", + "content": "Lack of Specificity. Existing evaluation tasks are still predominantly focused on factual assessment and knowledge retrieval, lacking evaluations that probe deeper analytical thinking. This constraint limits the ability to measure a model's capacity for profound reasoning and cognitive depth." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 529, + 294, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 529, + 294, + 601 + ], + "spans": [ + { + "bbox": [ + 50, + 529, + 294, + 601 + ], + "type": "text", + "content": "Task Uniformity. The majority of benchmarks are overly dependent on QA tasks, focusing on reactive, question-and-answer-based interactions. There is a pressing need to introduce tasks aligned with real-world applications, such as active information retrieval tasks based on personal knowledge or proactive knowledge discovery." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 610, + 294, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 610, + 294, + 718 + ], + "spans": [ + { + "bbox": [ + 50, + 610, + 294, + 718 + ], + "type": "text", + "content": "Insufficient Dimensions. Evaluations are primarily end-to-end, focusing solely on final outcomes. However, with the introduction of reasoning processes, RAG+Reasoning systems have become iterative, multi-step frameworks. Current evaluations are unable to assess intermediate reasoning steps or retrieval chains effectively. The absence of step-by-step supervision data limits both research and training of related methods. Furthermore, current evaluation methodologies lack comprehensive assessments of system performance" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 72, + 558, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 558, + 96 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 558, + 96 + ], + "type": "text", + "content": "trade-offs, such as computational cost and efficiency, which are critical for practical deployment." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 96, + 559, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 96, + 559, + 192 + ], + "spans": [ + { + "bbox": [ + 313, + 96, + 559, + 192 + ], + "type": "text", + "content": "This emergent landscape necessitates the creation of a new generation of evaluation frameworks that can address these shortcomings. Such frameworks must not only ensure the adaptability of retrieval and the controllability of generation but also integrate intermediate reasoning evaluation and efficiency metrics, paving the way for the development of more robust and efficient RAG systems suited to diverse real-world applications." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 204, + 480, + 216 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 204, + 480, + 216 + ], + "spans": [ + { + "bbox": [ + 314, + 204, + 480, + 216 + ], + "type": "text", + "content": "6.2 New Tasks on RAG+Reasoning" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 219, + 559, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 219, + 559, + 386 + ], + "spans": [ + { + "bbox": [ + 313, + 219, + 559, + 386 + ], + "type": "text", + "content": "Recently, combining RAG with reasoning has significantly improved models' ability to tackle more realistic and challenging tasks, raising the standards for evaluation methods. This subsection examines emerging tasks that assess their combined strengths, related tasks and datasets are shown in Table 2. Here, \"emerging\" refers not to entirely new tasks but to those with unprecedented complexity and demands. These include Deep Research tasks requiring multi-layered information integration and reasoning; PhD (Expert)-Level Complex Reasoning tasks targeting advanced scenario reasoning; and critical; domain-specific decision support tasks like medical diagnosis and legal analysis. Such tasks demand not only external knowledge retrieval but also logical consistency, coherence, and depth in reasoning." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 396, + 559, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 396, + 559, + 478 + ], + "spans": [ + { + "bbox": [ + 313, + 396, + 559, + 478 + ], + "type": "text", + "content": "6.2.1 Deep Research. From the perspective of integrating RAG and reasoning, Deep Research tasks exemplify complex downstream applications. They require models to handle open-ended retrieval, produce long-form, structured text, and synthesize multi-source information through deep reasoning. This section analyzes their key features, evaluation datasets, and metrics." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 479, + 559, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 479, + 559, + 514 + ], + "spans": [ + { + "bbox": [ + 313, + 479, + 559, + 514 + ], + "type": "text", + "content": "At the core of Deep Research tasks lies the mission of addressing complex informational queries. These tasks are distinguished by several key attributes:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 514, + 559, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 514, + 559, + 586 + ], + "spans": [ + { + "bbox": [ + 313, + 514, + 559, + 586 + ], + "type": "text", + "content": "First, dynamic interactivity is essential. Models engage in iterative dialogue to uncover latent user needs or \"unknown unknowns\". For example, the Co-Storm [43] framework enables collaboration with multiple language model agents to explore information gradually, easing user cognitive load and capturing unmet needs more accurately." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 586, + 559, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 586, + 559, + 646 + ], + "spans": [ + { + "bbox": [ + 313, + 586, + 559, + 646 + ], + "type": "text", + "content": "Second, integrating information from multiple sources is crucial. Models must consolidate diverse data to provide comprehensive coverage. For instance, uses dynamic mind maps to structure knowledge and produce cohesive reports, ensuring accuracy and completeness." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 647, + 559, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 647, + 559, + 718 + ], + "spans": [ + { + "bbox": [ + 313, + 647, + 559, + 718 + ], + "type": "text", + "content": "Third, expert-level accuracy is required. Many tasks demand domain expertise, expecting models to perform like human specialists. The Agentic Reasoning [92] framework illustrates this with high-stakes scenarios like medical treatment design or legal analysis, where outputs are judged on correctness, depth, and coherence." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "text", + "content": "Synergizing RAG and Reasoning: A Systematic Review" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 399, + 47, + 558, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 47, + 558, + 57 + ], + "spans": [ + { + "bbox": [ + 399, + 47, + 558, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 50, + 92, + 560, + 764 + ], + "blocks": [ + { + "bbox": [ + 129, + 71, + 481, + 84 + ], + "lines": [ + { + "bbox": [ + 129, + 71, + 481, + 84 + ], + "spans": [ + { + "bbox": [ + 129, + 71, + 481, + 84 + ], + "type": "text", + "content": "Table 2. Tasks and Datasets under the New Trend of RAG Combined with Reasoning" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 50, + 92, + 560, + 764 + ], + "lines": [ + { + "bbox": [ + 50, + 92, + 560, + 764 + ], + "spans": [ + { + "bbox": [ + 50, + 92, + 560, + 764 + ], + "type": "table", + "html": "
Task TypeSub-TaskDatasetDescriptionScaleConstruction ByEvaluationPaper
Deep ResearchDeep ResearchAgentic ReasoningPHD-level dataset covering finance, medicine, and law.15-30 domainsPhD ExpertsExpert pass rate[92]
Report Genera-tionWildSeek [44]Info-seeking task-goal pairs for document generation.100 samplesRules/LLM/ManualLLM[98]
Report Genera-tionTELL ME A STORY [37]fiction writing evaluation dataset: detailed prompts and long-form narratives.230 samplesManualLLM[98]
Peer ReviewReview-5k [91]ICLR 2024 peer review dataset: paper metadata and structured reviewer feedback.4,991 papersOpenReview/arXivMSE/MAE/Acc[91]
Report Genera-tionResearch-14k [91]2022-2024 Accepted ML pa-pers: outlines, full texts, and cited abstracts.14,911 papersSemantic Scholar + arXivSimulated review scores[91]
Report Genera-tionSolutionBench [54]Engineering benchmark: constrained solutions across 8 real-world domains.1,050 datapointsManual/LLM ex-tractionAnalytical/ Tech-nical scores[54]
Mathematics & ReasoningMath ReasoningGPQA [67]PHD-level MCQs in physics, chemistry, and biology.744 setsPhD ExpertsAccuracy[92]
Math ReasoningMATH500 [55]500 math problems from the MATH test set.500 problemsPublic reposPass@K[51]
ProgrammingLiveCodeBench [40]Programming benchmark with easy, medium, and hard problems.1,055 problemsCompetition plat-formsPass@K[51]
ProgrammingUSACO [70]USA Computing Olympiad problems, testing algorithms and coding.307 problemsUSA Computing OlympiadPass@K[52]
Math ReasoningTheoremQA-Math [33]BRIGHT subset: theorem-based math problems.206 problemsSTEM datasetsAccuracy[52]
ProgrammingGorilla [64]API-aware code generation from HuggingFace, Torch Hub, TensorFlow Hub docs.1,600 APIsManualAST matching[73]
Math ReasoningOlympiadBench [29]Olympiad-level math compe-tition problems.1,000 problemsCompetitionsAccuracy/F1[109]
Complex Reason-ingComplexWebQA [76]Multi-step reasoning over web queries with cross-document integration.34,689 queriesWeb snippetsAccuracy[36]
Demanding RetrievalDomain RetrievalStackEcon & Stack-Bio [33]Biology and economics StackExchange questions for complex retrieval.206 queriesStackExchangenDCG@K[52]
Active RetrievalAR-Bench [14]Active retrieval benchmark with four sub-tasks.8k/sub-taskSyntheticAccuracy[14]
Real-timeTAQA [104]QA dataset with time-evolving answers.10K-100K rowsHuman-curatedLLM[14]
Real-timeFreshQA [80]Dynamic fact QA benchmark with evolving answers.600 samplesMixed sourcesLLM[14]
Domain RetrievalPubMed [42]PICO-based medical search dataset linking reviews to PubMed.21k+ samplesSystematic re-viewsRecall@K[42]
Domain RetrievalTrial search [42]PICO-based clinical trial search linked to ClinicalTrials.gov.7k+ samplesManuallyRecall@K[42]
Domain RetrievalFinSearchBench-24 [50]Financial retrieval benchmark covering stocks, rates, policy, trends.1,500 queriesManuallyAccuracy[50]
Decision & QABusinessDQA [48]Decision QA benchmark with business scenarios in enterprise settings.301 pairsvideo gamesAccuracy[48]
MedicalCMB-Clin [87]CMB subset for clinical diagnosis reasoning in Chinese medical cases.74 casesTextbooks/diagnosticLLM/Expert materials[11]
MedicalMM-Cases [11]Medicine cases generated by GPT-40-mini, verified by doctors.609 casesLLM/doctor-reviewedLLM/Expert[11]
MedicalTCM-Cases [11]TCM patient cases generated by GPT-40-mini, verified by doctors.130 casesLLM/doctor-reviewedLLM/Expert[11]
", + "image_path": "a92e1ff229df312dcac56e5cfb3e551bc2263f588f8e2558c8d7b588227aff0d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "spans": [ + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "type": "text", + "content": "Gao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 72, + 294, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 72, + 294, + 119 + ], + "spans": [ + { + "bbox": [ + 53, + 72, + 294, + 119 + ], + "type": "text", + "content": "Fourth, multi-modal reasoning is often necessary. Deep Research tasks involve varied data types—text, code, knowledge graphs—and dynamic tool use such as web searches or code execution to enhance reasoning." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 121, + 294, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 121, + 294, + 190 + ], + "spans": [ + { + "bbox": [ + 53, + 121, + 294, + 190 + ], + "type": "text", + "content": "Finally, handling multiple real-world constraints is vital. Tasks may require generating practical solutions under specific conditions, like designing hospitals in challenging environments with factors like heavy rainfall and seismic activity, as seen in the DeepSolution framework. This ensures outputs are feasible and relevant." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 192, + 294, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 192, + 294, + 227 + ], + "spans": [ + { + "bbox": [ + 53, + 192, + 294, + 227 + ], + "type": "text", + "content": "To ensure the diversity and complexity of Deep Research tasks, their evaluation relies on datasets drawn from multiple domains. A few notable examples include:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 228, + 294, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 228, + 294, + 346 + ], + "spans": [ + { + "bbox": [ + 53, + 228, + 294, + 346 + ], + "type": "text", + "content": "WildSeek Dataset [44]: This dataset is constructed from real-world user information-seeking scenarios and comprises 100 data points covering 24 fields, including economics, computer science, and law. Each data point is characterized by a topic, user goal, and domain label. For example: \"Domain: Economics; Topic: Development of a Shared Trading Currency; Goal: Investigate how a new shared currency could eliminate transaction costs\". WildSeek effectively evaluates models' competence in dynamic interaction and multi-source information integration." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 347, + 294, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 347, + 294, + 526 + ], + "spans": [ + { + "bbox": [ + 53, + 347, + 294, + 526 + ], + "type": "text", + "content": "GAIA [62]. The GAIA Benchmark, developed jointly by Meta AI, Hugging Face, and others, is a comprehensive evaluation framework designed to assess general AI assistants' ability to handle real-world problems. It features 466 carefully crafted tasks spanning language reasoning, visual perception, multi-agent collaboration, and adaptability, focusing on key skills like reasoning, multimodal processing, web browsing, and tool use. GAIA measures performance across dimensions such as task execution, adaptability, collaboration, generalization, and real-world reasoning with metrics like completion rate, response quality, efficiency, and robustness. Unlike traditional benchmarks, it emphasizes robustness and reliability in everyday scenarios, supports zero-shot evaluation, prevents data contamination, and is widely used in research and industry to guide AI development." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 527, + 294, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 527, + 294, + 634 + ], + "spans": [ + { + "bbox": [ + 53, + 527, + 294, + 634 + ], + "type": "text", + "content": "SolutionBench [54]: This dataset spans eight engineering domains, including environmental, mining, and transportation engineering. Each instance presents a complex engineering problem with specific constraints. For example: \"Design a safe and efficient hospital construction plan in a region with 3000mm annual rainfall, expansive soils, and frequent seismic activity.\" SolutionBench evaluates models' ability to address multi-constraint problems and integrate specialized knowledge effectively." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 635, + 294, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 635, + 294, + 705 + ], + "spans": [ + { + "bbox": [ + 53, + 635, + 294, + 705 + ], + "type": "text", + "content": "The current evaluation system for DeepResearch faces the dual challenges of scarce specialized testing tasks and the difficulty of assessing complex, lengthy reports: On one hand, existing benchmark tests only cover basic capabilities and lack systematic evaluation standards in specialized scenarios like business analysis and policy assessment; on the" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 317, + 72, + 558, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 72, + 558, + 144 + ], + "spans": [ + { + "bbox": [ + 317, + 72, + 558, + 144 + ], + "type": "text", + "content": "other hand, the multimodal integration, logical chain verification, and domain adaptability testing of long reports pose technical bottlenecks for traditional assessment methods, necessitating the development of new evaluation tools that integrate logic graphs, dynamic scenario simulation, and domain knowledge bases." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 317, + 144, + 558, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 144, + 558, + 251 + ], + "spans": [ + { + "bbox": [ + 317, + 144, + 558, + 251 + ], + "type": "text", + "content": "In the future, the evaluation system will evolve into a multidimensional framework, including the construction of a three-level indicator matrix covering basic capabilities, reasoning levels, and application value. Overcoming these evaluation bottlenecks requires both technological innovation and joint standard-building efforts. This concerns not only the reliability validation of intelligent research tools but also the reshaping of research evaluation paradigms and industrial application boundaries." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 317, + 264, + 558, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 264, + 558, + 525 + ], + "spans": [ + { + "bbox": [ + 317, + 264, + 558, + 525 + ], + "type": "text", + "content": "6.2.2 PhD (Expert)-Level Complex Reasoning. The integration of RAG with advanced reasoning has become essential for tackling expert-level, complex cognitive tasks, particularly at the PhD level. These tasks, including competitive programming, theorem-driven proof reasoning, and cross-disciplinary knowledge retrieval, require multi-layered logical inference and precise coordination between dynamic retrieval and domain-specific knowledge. PhD-level reasoning differs from standard evaluations across three dimensions: knowledge intensity, procedural rigor, and domain specificity. Knowledge intensity demands dynamic access to deep, specialized knowledge, such as analyzing dynamic programming time complexity or applying algebraic topology theorems—needs that surpass general corpora and call for domain-specific knowledge graphs and retrieval methods. Procedural rigor involves mathematical precision in multistep proofs, requiring logical consistency in symbolic manipulation, theorem use, and counterexample refutation, as seen in international math competitions. Domain specificity reflects tailored reasoning methods, e.g., handling synchronization in concurrent programming or employing tensor calculus in quantum field theory." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 317, + 527, + 558, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 527, + 558, + 658 + ], + "spans": [ + { + "bbox": [ + 317, + 527, + 558, + 658 + ], + "type": "text", + "content": "Evaluation systems for such tasks are inherently multilayered and multimodal. The USACO Benchmark [71] offers a graduated difficulty scale for programming reasoning, testing both correctness and algorithmic constraints like time complexity. TheoremQA-Math [9] links formalized math problems to theorem libraries, demanding verifiable mappings between theorem applications and calculations. Cross-disciplinary datasets like StackBio and StackEcon [53] assess models' ability to extract critical knowledge from dense, domain-rich documents, serving as strong tests for domain-oriented retrieval accuracy." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 317, + 658, + 558, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 658, + 558, + 717 + ], + "spans": [ + { + "bbox": [ + 317, + 658, + 558, + 717 + ], + "type": "text", + "content": "Modern evaluation surpasses traditional end-to-end tests by combining process and outcome validation. Frameworks like CR-Planner [52] use dual models—a Sub-Goal Critic to score reasoning chains and an Execution Critic to evaluate retrieval—allowing fine-grained step monitoring. For" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 48, + 232, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 48, + 232, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 48, + 232, + 57 + ], + "type": "text", + "content": "Synergizing RAG and Reasoning: A Systematic Review" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 400, + 48, + 558, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 400, + 48, + 558, + 57 + ], + "spans": [ + { + "bbox": [ + 400, + 48, + 558, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 72, + 294, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 72, + 294, + 178 + ], + "spans": [ + { + "bbox": [ + 53, + 72, + 294, + 178 + ], + "type": "text", + "content": "example, in dynamic programming, key steps like formulating state transitions and retrieving boundary conditions receive targeted feedback. Similarly, Search-O1 [51] quantifies knowledge completeness by tracking uncertainty indicators (e.g., tentative language), measuring confidence and accuracy. Outcome validation maintains strict correctness benchmarks in programming and combines metrics like F1 scores with expert review in open-domain scientific QA to ensure precise understanding of domain-specific terms." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 190, + 227, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 190, + 227, + 201 + ], + "spans": [ + { + "bbox": [ + 53, + 190, + 227, + 201 + ], + "type": "text", + "content": "6.3 Challenges and Future Directions" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 205, + 294, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 205, + 294, + 370 + ], + "spans": [ + { + "bbox": [ + 53, + 205, + 294, + 370 + ], + "type": "text", + "content": "6.3.1 Complex Domain Tasks. Recent advances in RAG have provided novel solutions for more complex tasks in professional domains. These downstream tasks transcend the limitations of traditional question-answering models that rely solely on simple retrieval-generation patterns, involving challenges such as real-time information acquisition, integration of domain expertise, and dynamic decision-making support. The nature of these tasks can be characterized along three interrelated dimensions: (1) temporal dynamics, emphasizing the rapid changes in data and reasoning environment; (2) domain specificity, focusing on deep integration of industry knowledge and structured data; and (3) reasoning chain complexity, reflecting requirements for multi-stage reasoning and fine-grained decomposition of queries." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 372, + 301, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 372, + 301, + 597 + ], + "spans": [ + { + "bbox": [ + 53, + 372, + 301, + 597 + ], + "type": "text", + "content": "To rigorously evaluate such systems, innovative benchmarking approaches have been proposed. The FinSearchBenchmark-24 dataset, for example, encompasses five months of market data variations, integrating multi-variable interactions across stock, policy, and industrial sectors, and includes over 1,500 multiple-choice questions, thereby surpassing the constraints of traditional static benchmarks. The evaluation adopts a hierarchical and quantitative methodology: the foundational level measures model accuracy and response latency; the intermediate layer assesses the temporal sensitivity of information relevance and the contribution of retrieval mechanisms to reasoning outcomes; and the advanced layer employs ablation studies to highlight performance variances under dynamic temporal decay. This multifaceted evaluation not only differentiates surface-level retrieval capabilities but also rigorously measures the synergy between reasoning quality and temporal context, furnishing theoretical and practical foundations for long-term stability and predictive accuracy in complex domain systems." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 599, + 294, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 599, + 294, + 717 + ], + "spans": [ + { + "bbox": [ + 53, + 599, + 294, + 717 + ], + "type": "text", + "content": "Experimental findings further reveal that establishing long-term evaluation protocols with temporal weighting functions is indispensable for adapting to realistic dynamic environments. Nonlinear declines in decision accuracy, observed when extending relevance windows from 72 to 168 hours, emphasize the importance of factoring temporal decay into assessment frameworks. Future work should extend these evaluation protocols to high-stakes domains such as medical diagnostics and legal consultation, where the standardization of interpretability metrics will critically support" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 317, + 73, + 557, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 73, + 557, + 95 + ], + "spans": [ + { + "bbox": [ + 317, + 73, + 557, + 95 + ], + "type": "text", + "content": "the evolution of RAG+ reasoning systems toward robust and trustworthy decision-assistance platforms." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 317, + 105, + 558, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 105, + 558, + 402 + ], + "spans": [ + { + "bbox": [ + 317, + 105, + 558, + 402 + ], + "type": "text", + "content": "6.3.2 Decision Support and Active Retrieval. The expansion of RAG+Reasoning frameworks into specialized tasks has fostered two complementary research paradigms: decision optimization and active retrieval. In the decision optimization category, systems must leverage heterogeneous structured data, rule bases, and objective functions to formulate optimal strategies. Representative systems like PlanRAG formalize Decision Question Answering (Decision QA) tasks targeting enterprise-level scenarios including supply chain optimization, industrial resource allocation, and market price regulation. These tasks require planning multimodal reasoning paths where models iteratively retrieve data from relational and graph databases, integrate intricate business rules, and iteratively refine decision-making paths through replanning mechanisms. To evaluate such capabilities, the Decision QA (DQA) benchmark creates dual database versions (MySQL and Neo4j) derived from economic systems in strategy games, assessing cross-structured generalization. The evaluation consists of a three-tier framework: the core tier measures answer accuracy; the intermediate layer diagnoses error types to identify system bottlenecks; and the foundational tier focuses on retrieval efficiency and the impact of replanning frequency. This structured evaluation framework not only tracks performance but also offers actionable insights for system refinement." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 317, + 403, + 558, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 403, + 558, + 510 + ], + "spans": [ + { + "bbox": [ + 317, + 403, + 558, + 510 + ], + "type": "text", + "content": "Conversely, the active retrieval evaluation addresses the challenge of dynamically determining when and how to invoke retrieval under complex multimodal contexts. Unlike rigid traditional RAG systems, UAR applies lightweight classifiers for fast, accurate triggers, improving performance in time-sensitive or creative tasks. Tested on AR-Bench, it combines binary trigger accuracy with GPT assessments, exact matches, and human reviews, boosting adaptability across diverse contexts." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 317, + 511, + 558, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 511, + 558, + 628 + ], + "spans": [ + { + "bbox": [ + 317, + 511, + 558, + 628 + ], + "type": "text", + "content": "Emerging trends in these evaluation paradigms indicate a shift from static, rule-based frameworks to dynamic system simulations, as exemplified by DQA's use of game engine-generated datasets to simulate realistic environments. Similarly, active retrieval tasks progress from simple retrieval trigger decisions toward collaborative multi-criteria decision-making. Evaluation methodologies are concurrently evolving from singular performance metrics to multidimensional matrices comprising core effectiveness, diagnostic error distributions, and economic cost measures." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 317, + 643, + 410, + 654 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 643, + 410, + 654 + ], + "spans": [ + { + "bbox": [ + 317, + 643, + 410, + 654 + ], + "type": "text", + "content": "7 Cost and Risk" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 317, + 659, + 558, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 659, + 558, + 717 + ], + "spans": [ + { + "bbox": [ + 317, + 659, + 558, + 717 + ], + "type": "text", + "content": "Integrating reasoning into RAG systems is neither effortless nor purely beneficial. Recent trends have exaggerated its advantages while downplaying the costs and risks. This trade-off between performance and cost is crucial. This section examines the expenses and misuse risks linked to adding" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 48, + 211, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 48, + 211, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 48, + 211, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 525, + 48, + 558, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 48, + 558, + 56 + ], + "spans": [ + { + "bbox": [ + 525, + 48, + 558, + 56 + ], + "type": "text", + "content": "Gao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 70, + 558, + 266 + ], + "blocks": [ + { + "bbox": [ + 53, + 70, + 558, + 266 + ], + "lines": [ + { + "bbox": [ + 53, + 70, + 558, + 266 + ], + "spans": [ + { + "bbox": [ + 53, + 70, + 558, + 266 + ], + "type": "image", + "image_path": "5456685368ffe44fb4c5b81029bd5ef81d13e2a9f1ac24b37bf34bf87ce8844d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 71, + 275, + 537, + 288 + ], + "lines": [ + { + "bbox": [ + 71, + 275, + 537, + 288 + ], + "spans": [ + { + "bbox": [ + 71, + 275, + 537, + 288 + ], + "type": "text", + "content": "Figure 8. From LLM to RAG and then to RAG+Reasoning, performance improvement comes with additional cost." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 304, + 295, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 304, + 295, + 363 + ], + "spans": [ + { + "bbox": [ + 50, + 304, + 295, + 363 + ], + "type": "text", + "content": "reasoning to RAG systems. As shown in Figure 8, the cost of moving from LLM to RAG, then to RAG + Reasoning, incurs an inevitable \"invisible tax\". Though often hidden by performance gains, this cost is vital in assessing these methods' overall practicality and efficiency." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 363, + 295, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 363, + 295, + 483 + ], + "spans": [ + { + "bbox": [ + 50, + 363, + 295, + 483 + ], + "type": "text", + "content": "The shift from LLM to RAG moves from simplicity to enhanced knowledge handling by incorporating external information. A basic LLM provides direct, efficient answers with low latency and token use but is limited to pre-trained knowledge, restricting complex or up-to-date queries. RAG overcomes this by adding a vector database for external retrieval, vastly expanding response scope and reliability. However, this requires substantial data processing, storage, and introduces higher latency and token costs due to data chunking, encoding, indexing, and retrieval overhead." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 483, + 295, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 483, + 295, + 616 + ], + "spans": [ + { + "bbox": [ + 50, + 483, + 295, + 616 + ], + "type": "text", + "content": "Advancing from RAG to RAG + Reasoning adds multistep reasoning capabilities, enabling complex task handling, autonomous decisions, and more context-aware responses through intricate reasoning. This comes at the expense of increased delays, token consumption, processing demands, and greater complexity in system integration and maintenance. The reasoning layer's autonomy also brings opaqueness, unpredictability, and heightened security and reliability risks. These challenges highlight the necessity of carefully balancing effectiveness against costs when adopting RAG + Reasoning in real-world applications." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 632, + 231, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 632, + 231, + 643 + ], + "spans": [ + { + "bbox": [ + 51, + 632, + 231, + 643 + ], + "type": "text", + "content": "7.1 Cost Trade-off in RAG+Reasoning" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 647, + 295, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 647, + 295, + 718 + ], + "spans": [ + { + "bbox": [ + 50, + 647, + 295, + 718 + ], + "type": "text", + "content": "Figure 9 illustrates typical works combining RAG and Reasoning, showing retrieval and reasoning demands alongside token consumption. While integrating dynamic knowledge retrieval with multi-step reasoning greatly improves accuracy in more complex tasks, the resulting systemic costs are often underestimated in research and practice. These costs" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 304, + 560, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 304, + 560, + 411 + ], + "spans": [ + { + "bbox": [ + 313, + 304, + 560, + 411 + ], + "type": "text", + "content": "grow non-linearly, causing serious efficiency bottlenecks in real-world use. The tradeoff between effectiveness and efficiency stems from RAG+Reasoning's architecture: multistage task decoupling, dynamic path planning, and intermediate state preservation. These features improve reasoning quality but trigger cascading increases in computational resources, token usage, and reduced retrieval efficiency. This section explores these implicit tradeoffs from the angles of resource use, token consumption, and retrieval efficiency." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 421, + 569, + 718 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 313, + 421, + 561, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 421, + 561, + 636 + ], + "spans": [ + { + "bbox": [ + 313, + 421, + 561, + 636 + ], + "type": "text", + "content": "7.1.1 Non-Linear Growth of Computational Resources. The RAG+Reasoning framework separates retrieval and reasoning into multiple stages, causing computational demands to grow non-linearly. Dynamic chain-of-reasoning methods execute multiple LLM generations and retrievals per inference, resulting in complexity far exceeding baseline models. Fixed-length reasoning chains trigger repeated retrieval and generation calls, increasing resource needs with task complexity. More advanced techniques like MCTS-guided methods add rounds of candidate path generation and evaluation, further multiplying runtime and memory usage on GPUs compared to linear methods. Even simpler multi-step planning tasks incur much higher overhead than single-stage retrieval models due to extra graph construction and analysis. While this resource intensity improves inference accuracy, it poses serious scalability challenges under limited resources as computational costs grow superlinearly with model size, retrieval chain length, and task complexity." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 647, + 569, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 647, + 569, + 718 + ], + "spans": [ + { + "bbox": [ + 313, + 647, + 569, + 718 + ], + "type": "text", + "content": "7.1.2 Implicit Token Inflation. Multi-step reasoning frameworks inherently cause significant token inflation through iterative intermediate processes like thought chains, retrieved documents, and verification feedback. Active learning setups consolidate multiple intermediate results—retrieved documents, counterfactuals, multi-round validations—leading to" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "text", + "content": "Synergizing RAG and Reasoning: A Systematic Review" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 399, + 47, + 559, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 47, + 559, + 57 + ], + "spans": [ + { + "bbox": [ + 399, + 47, + 559, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 70, + 560, + 380 + ], + "blocks": [ + { + "bbox": [ + 52, + 70, + 560, + 380 + ], + "lines": [ + { + "bbox": [ + 52, + 70, + 560, + 380 + ], + "spans": [ + { + "bbox": [ + 52, + 70, + 560, + 380 + ], + "type": "image", + "image_path": "43b9c3e6021b5e7e0d22dfa723c29a2ecc7d344929cf0de432c2f1383653b5ca.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 153, + 390, + 456, + 403 + ], + "lines": [ + { + "bbox": [ + 153, + 390, + 456, + 403 + ], + "spans": [ + { + "bbox": [ + 153, + 390, + 456, + 403 + ], + "type": "text", + "content": "Figure 9. Cost quadrant diagram of retrieval and reasoning requirements" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 418, + 296, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 418, + 296, + 539 + ], + "spans": [ + { + "bbox": [ + 50, + 418, + 296, + 539 + ], + "type": "text", + "content": "token usage well beyond typical limits. Chain-based retrieval also generates token bloat due to exhaustive candidate path exploration. Iterative reasoning path selection, expansion, and evaluation add heavy token overhead in tasks needing deep reasoning chains involving extensive sequence generation and evaluation. Token usage grows exponentially with task complexity and increases further when intermediate reasoning favors depth or breadth. This inflation raises API costs and memory demands, especially in long-text generation like Deep Research [106]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 551, + 298, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 551, + 298, + 718 + ], + "spans": [ + { + "bbox": [ + 50, + 551, + 298, + 718 + ], + "type": "text", + "content": "7.1.3 Marginal Decline in Retrieval Efficiency. Dynamic retrieval improves knowledge precision but suffers diminishing efficiency as task complexity increases. Adaptive methods reduce retrievals for simple tasks but still require multiple iterations for complex ones, adding significant overhead compared to standard RAG. The tradeoff between retrieval quality and frequency further limits efficiency. High-accuracy retrieval methods incur heavy computational and time costs, negating their efficiency benefits. Even advanced retrieval-trigger optimizations can't fully remove this overhead due to extra training and deployment costs [41]. This natural efficiency ceiling highlights ongoing challenges in balancing retrieval accuracy and resource use, especially in large, complex tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 418, + 560, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 418, + 560, + 622 + ], + "spans": [ + { + "bbox": [ + 313, + 418, + 560, + 622 + ], + "type": "text", + "content": "7.1.4 Toward a Cost Model Framework. Against this backdrop, the development of fine-grained cost models becomes a necessary precondition for balancing effectiveness and efficiency. Existing evaluation metrics, which often rely on single-task performance indicators (such as Exact Match or F1) or coarse-grained runtime statistics, lack the comprehensiveness to jointly model computational resources, token flow, and retrieval overhead. Consequently, they fail to quantify the true tradeoffs in reasoning mechanisms. For instance, while multi-hop reasoning may improve task accuracy, these improvements are frequently offset by exponential growth in token consumption and latency relative to baseline methods. A fine-grained cost model would enable researchers and practitioners to more accurately evaluate the real benefits of reasoning-centric frameworks while addressing the underexplored interplay between computational cost and task performance." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 314, + 632, + 488, + 643 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 632, + 488, + 643 + ], + "spans": [ + { + "bbox": [ + 314, + 632, + 488, + 643 + ], + "type": "text", + "content": "7.2 Potential Risk of Over-Thinking" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 646, + 560, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 646, + 560, + 718 + ], + "spans": [ + { + "bbox": [ + 313, + 646, + 560, + 718 + ], + "type": "text", + "content": "In the process of developing deep thinking models, \"overthinking\" poses a key risk to system efficiency and reliability [10, 15, 19, 30, 74, 81], and this issue is further amplified after combining with RAG. It appears as redundant reasoning steps, excessive validation of known conclusions, or unnecessarily broad retrieval scopes, wasting computational" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "spans": [ + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "type": "text", + "content": "Gao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 72, + 294, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 72, + 294, + 227 + ], + "spans": [ + { + "bbox": [ + 53, + 72, + 294, + 227 + ], + "type": "text", + "content": "resources, increasing error propagation, and degrading performance. For example, in financial risk assessment, an LLM with RAG might retrieve multiple similar market reports and repeatedly verify the same economic indicators rather than focusing on core risks, leading to delayed decisions. This stems from an imbalance between reasoning and retrieval: after accessing external knowledge, the model can enter a \"self-validation loop,\" repeatedly parsing overlapping or contradictory documents. The generation module, seeking reliability, may trigger further retrievals, creating a feedback loop that worsens inefficiency. This issue is critical in real-time systems like medical diagnosis, where over-retrieval of irrelevant literature can delay urgent decisions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 228, + 294, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 228, + 294, + 371 + ], + "spans": [ + { + "bbox": [ + 53, + 228, + 294, + 371 + ], + "type": "text", + "content": "Case studies show the impact of overthinking [74]. In legal document interpretation, early reasoning errors can amplify through the retrieval-generation loop, causing retrieval along incorrect paths and yielding illogical conclusions. This error propagation is evident in systems like the Search-o1 [51], where flawed information extraction misguides subsequent reasoning. In industrial equipment manual interpretation, overextended reasoning with highly similar documents risks obscuring critical parameter differences, increasing procedural errors. These examples illustrate that overthinking not only hampers knowledge integration but also creates safety hazards in practical applications." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 372, + 306, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 372, + 306, + 538 + ], + "spans": [ + { + "bbox": [ + 53, + 372, + 306, + 538 + ], + "type": "text", + "content": "To mitigate these risks, researchers propose multiple optimization frameworks. ReaRAG [49] limits reasoning chain length and incorporates self-reflection to prune invalid branches. A simple and effective way is to use a two-stage filtering process, first narrowing documents by metadata, then validating fragment relevance, reducing redundant information—for instance, retrieving only relevant legal clauses rather than entire regulatory texts. The DeepSeek R1 [26] applies reinforcement learning with distillation to penalize redundant steps, cutting repeated formula validation in math proofs by over " + }, + { + "bbox": [ + 53, + 372, + 306, + 538 + ], + "type": "inline_equation", + "content": "40\\%" + }, + { + "bbox": [ + 53, + 372, + 306, + 538 + ], + "type": "text", + "content": ". These approaches transform open-ended reasoning into controlled, goal-directed processes, using methods like attention weight analysis to measure information gain or confidence functions to evaluate reasoning paths." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 539, + 294, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 539, + 294, + 693 + ], + "spans": [ + { + "bbox": [ + 53, + 539, + 294, + 693 + ], + "type": "text", + "content": "Current research balances constraints with model creativity. Knowledge graph-guided reasoning is tested in clinical trials to prioritize key medical features over exhaustive literature retrieval [11]. Causal reasoning models aim to break error chains; for example, in financial forecasting, causal graphs restrict reasoning to logically relevant macroeconomic links. Adaptive stopping strategies adjust reasoning depth in customer service—simple queries use preset templates, complex issues activate multi-hop reasoning. These advances reshape retrieval-augmented reasoning, with the core challenge being to develop evaluation frameworks that avoid both \"cognitive stagnation\" from excessive constraints and \"cognitive overload\" from insufficient control." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 694, + 313, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 694, + 313, + 718 + ], + "spans": [ + { + "bbox": [ + 53, + 694, + 313, + 718 + ], + "type": "text", + "content": "Future progress will integrate cognitive science with computational modeling. By mimicking human \"intuition-verification\"" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 317, + 72, + 558, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 72, + 558, + 156 + ], + "spans": [ + { + "bbox": [ + 317, + 72, + 558, + 156 + ], + "type": "text", + "content": "decision-making, LLMs could switch seamlessly between rapid response and deep reasoning. In high-risk fields like industrial fault diagnosis, such hybrid models can quickly propose contingency plans after initial retrieval while verifying their validity through deeper analysis. This layered approach reduces overthinking risks and offers a safe, controllable path for applying LLMs in critical industries." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 317, + 167, + 418, + 179 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 167, + 418, + 179 + ], + "spans": [ + { + "bbox": [ + 317, + 167, + 418, + 179 + ], + "type": "text", + "content": "8 Practical Guide" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 317, + 183, + 558, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 183, + 558, + 326 + ], + "spans": [ + { + "bbox": [ + 317, + 183, + 558, + 326 + ], + "type": "text", + "content": "The combination of RAG and Reasoning is not a one-size-fits-all solution; it requires careful evaluation of each scenario's unique needs. As a rapidly evolving and relatively new field, practical applications are still limited, making best practices hard to define. This chapter abstracts and summarizes the key traits of typical RAG+Reasoning application domains and offers practical guidelines for system design based on these features. It provides recommendations on leveraging RAG's strengths with Reasoning, highlighting priorities, pitfalls to avoid, and current opportunities (Figure 10). The goal is to promote wider adoption and effective use of this technology in diverse, complex real-world settings." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 317, + 338, + 444, + 348 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 338, + 444, + 348 + ], + "spans": [ + { + "bbox": [ + 317, + 338, + 444, + 348 + ], + "type": "text", + "content": "8.1 Domain characteristics" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 317, + 352, + 558, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 352, + 558, + 662 + ], + "spans": [ + { + "bbox": [ + 317, + 352, + 558, + 662 + ], + "type": "text", + "content": "As illustrated in the left part of Figure 10, we develop a seven-dimensional feature system based on the three core stages of RAG—query, retrieval, and generation—to systematically analyze challenges and adaptation needs across various industries. The query stage emphasizes the complexity of intent understanding and the demand for advanced reasoning, recognizing that industries differ in query abstraction and specificity; some require quickly capturing implicit, deep intentions, while others need complex reasoning. Effective preservation of original semantic meaning during understanding and reasoning is key to improving RAG performance. Retrieval focuses on the system's adaptability to diverse and dynamic knowledge sources, which vary from rich multi-domain data to rapidly updating information; frequent updates and fragmented knowledge present challenges that demand effective integration to ensure consistent support for generation. The generation stage requires high-quality outputs, with strict control over hallucinations—especially critical in sensitive fields like healthcare and law—along with varying latency requirements for real-time or delayed responses. Explainability and traceability at this stage are essential for system credibility and serve as key evaluation metrics. This comprehensive framework reveals technical bottlenecks and guides improvements, and is applied to analyze four representative domains: finance, healthcare, law, and personal assistants." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 317, + 670, + 558, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 670, + 558, + 718 + ], + "spans": [ + { + "bbox": [ + 317, + 670, + 558, + 718 + ], + "type": "text", + "content": "8.1.1 Finance. In the finance domain, user queries typically focus on structured needs like investment decisions and risk forecasting. While intent understanding is moderately complex, the system must perform advanced reasoning amid" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 48, + 232, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 48, + 232, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 48, + 232, + 57 + ], + "type": "text", + "content": "Synergizing RAG and Reasoning: A Systematic Review" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 400, + 48, + 558, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 400, + 48, + 558, + 57 + ], + "spans": [ + { + "bbox": [ + 400, + 48, + 558, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 69, + 558, + 274 + ], + "blocks": [ + { + "bbox": [ + 52, + 69, + 558, + 274 + ], + "lines": [ + { + "bbox": [ + 52, + 69, + 558, + 274 + ], + "spans": [ + { + "bbox": [ + 52, + 69, + 558, + 274 + ], + "type": "image", + "image_path": "7ac1eba9b0f22ec452c189570cac365dfa05f5cba364f27f64c05775b1c82bff.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 177, + 285, + 433, + 298 + ], + "lines": [ + { + "bbox": [ + 177, + 285, + 433, + 298 + ], + "spans": [ + { + "bbox": [ + 177, + 285, + 433, + 298 + ], + "type": "text", + "content": "Figure 10. Practical guide to synergizing RAG and Reasoning" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 314, + 296, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 314, + 296, + 482 + ], + "spans": [ + { + "bbox": [ + 50, + 314, + 296, + 482 + ], + "type": "text", + "content": "rapidly changing market conditions, relying heavily on external knowledge and frequent updates. For example, portfolio return forecasting integrates time series analysis, policy interpretation, and cross-market reasoning. Retrieval demands handling diverse data sources—real-time market data, annual reports, and regulatory filings—with update cycles often measured in minutes. During generation, strict latency and hallucination control are crucial, as outputs must include decision-making suggestions with full data traceability. Investment research reports, for instance, require annotated key indicators, their data sources, and computation logic to ensure transparency and regulatory compliance. High latency control and robust traceability are essential to maintain transparency and adherence to financial regulations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 491, + 299, + 718 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 50, + 491, + 299, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 491, + 299, + 624 + ], + "spans": [ + { + "bbox": [ + 50, + 491, + 299, + 624 + ], + "type": "text", + "content": "8.1.2 Healthcare. Healthcare queries involve complex medical semantic parsing, often with ambiguous terms or incomplete symptoms. For example, \"persistent chest pain with shortness of breath\" requires multi-hop reasoning across cardiology, pulmonology, and emergency medicine. Retrieval must integrate electronic health records, medical imaging, and up-to-date clinical guidelines. In generation, hallucination tolerance is minimal—errors in drug dosages or protocols risk malpractice. Therefore, accuracy, timeliness, and explainability are paramount, with every decision step traceable and verifiable." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 634, + 296, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 634, + 296, + 718 + ], + "spans": [ + { + "bbox": [ + 50, + 634, + 296, + 718 + ], + "type": "text", + "content": "8.1.3 Legal Services. Legal consultations often require interpreting statutes and citing cases, balancing precise legal terms with natural language nuances. Retrieval depends on structured, infrequently updated sources like case law databases and local regulations. Generation demands accuracy—for instance, drafting contract clauses must precisely cite specific statutes (e.g., Article 472 of the Civil Code) down" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 313, + 560, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 313, + 560, + 361 + ], + "spans": [ + { + "bbox": [ + 313, + 313, + 560, + 361 + ], + "type": "text", + "content": "to the paragraph level for traceability. Explainability is essential, with traceability usually above " + }, + { + "bbox": [ + 313, + 313, + 560, + 361 + ], + "type": "inline_equation", + "content": "95\\%" + }, + { + "bbox": [ + 313, + 313, + 560, + 361 + ], + "type": "text", + "content": ", and probabilistic language avoided to comply with strict judicial documentation standards." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 375, + 560, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 375, + 560, + 555 + ], + "spans": [ + { + "bbox": [ + 313, + 375, + 560, + 555 + ], + "type": "text", + "content": "8.1.4 Personal Assistants. This domain features diverse, dynamic user needs, including schedule management, real-time navigation, and open-domain conversations. Accurate intent disambiguation through contextual awareness is crucial. Retrieval integrates fragmented sources like user behavior logs, geolocation, and social media. Generation latency varies: weather updates require sub-second responses, while travel planning can tolerate " + }, + { + "bbox": [ + 313, + 375, + 560, + 555 + ], + "type": "inline_equation", + "content": "5+" + }, + { + "bbox": [ + 313, + 375, + 560, + 555 + ], + "type": "text", + "content": " seconds. Hallucination tolerance depends on context—creative outputs are acceptable for recipes but not for flight information, which demands full accuracy. This necessitates adaptive verification in the RAG system. Though intent complexity is lower than in healthcare or legal fields, the domain's interaction diversity requires heavy reliance on external knowledge and dynamic balancing of latency and accuracy." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 571, + 411, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 571, + 411, + 582 + ], + "spans": [ + { + "bbox": [ + 314, + 571, + 411, + 582 + ], + "type": "text", + "content": "8.2 Do's and Don'ts" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 585, + 560, + 719 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 313, + 585, + 560, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 585, + 560, + 645 + ], + "spans": [ + { + "bbox": [ + 313, + 585, + 560, + 645 + ], + "type": "text", + "content": "Building on aforementioned domain characteristics, we further identify six common scenarios, and derive technical adaptation principles for each. This section outlines key optimization strategies (Do's) and prohibitions (Don'ts), to guide the co-design of RAG and reasoning." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 658, + 560, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 658, + 560, + 719 + ], + "spans": [ + { + "bbox": [ + 313, + 658, + 560, + 719 + ], + "type": "text", + "content": "8.2.1 Structured Reasoning Scenarios. For scenarios requiring multi-step logical decomposition and structured knowledge dependency, such as portfolio return prediction, Chain-of-Thought (CoT) task decomposition and knowledge graph (KG)-driven graph reasoning approaches should be" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "spans": [ + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "type": "text", + "content": "Gao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 72, + 294, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 72, + 294, + 251 + ], + "spans": [ + { + "bbox": [ + 53, + 72, + 294, + 251 + ], + "type": "text", + "content": "employed. Complex problems should be broken into verifiable sub-tasks, such as coupling market trend analysis with policy impact assessment, while leveraging knowledge graph constraints to ensure logical completeness and auditability. It is essential to incorporate a temporal validation layer to cross-check the consistency of timestamp-sensitive information (e.g., real-time market data or emergent regulatory policies) within a dynamic knowledge base. Approaches that exclude retrieval-based verification of salient features must be avoided, as they may lead to reasoning biases arising from the absence of structured knowledge anchors (e.g., critical indicators from financial statements). Furthermore, the reasoning space of LLMs should be constrained within domain-specific knowledge frameworks to prevent irrelevant or invalid deductions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 258, + 295, + 508 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 258, + 295, + 508 + ], + "spans": [ + { + "bbox": [ + 53, + 258, + 295, + 508 + ], + "type": "text", + "content": "8.2.2 Dynamic Demand-Responsive Scenarios. For scenarios characterized by rapidly shifting demands and user preference variability, such as itinerary planning and multimodal interaction in personal assistant services, a dynamic adaptation mechanism based on prompt engineering is recommended. By dynamically associating fragmented knowledge units (e.g., user behavior history and real-time traffic updates) with semantic templates and employing heuristic rules for search-space pruning (e.g., prioritizing locally updated information within the past 24 hours), the system can balance contextual adaptability with response speed. Model fine-tuning or reinforcement learning (RLHF/DPO)-based strategy updates should be avoided due to their lengthy iterative cycles and computational overhead, which cannot meet real-time responsiveness requirements, such as millisecond-grade reaction times for last-minute destination changes. Lightweight caching architectures should be implemented within the retrieval system, prioritizing frequently accessed knowledge fragments, such as operating hours of popular tourist attractions, to achieve an equilibrium between dynamism and stability." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 515, + 295, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 515, + 295, + 717 + ], + "spans": [ + { + "bbox": [ + 53, + 515, + 295, + 717 + ], + "type": "text", + "content": "8.2.3 Deterministic Decision-Making Scenarios. In scenarios requiring a single, reliable conclusion, such as clinical diagnosis generation in the healthcare domain, a multi-level deterministic assurance system should be established. Time-validation layers can filter outdated knowledge (e.g., therapies no longer approved), while field-sensitive retrieval modules trigger predefined decision rules conforming to up-to-date clinical guidelines (e.g., those codified within the latest version of the International Classification of Diseases [ICD]). Knowledge graph path constraints should restrict the reasoning process to validated causal links within medical logic (e.g., linking symptom patterns to laboratory test results within corroborated diagnostic pathways), thereby minimizing the likelihood of deviations from standard protocols. Probabilistic exploration strategies that generate alternative hypotheses (e.g., speculative differential diagnoses for atypical pneumonia) should be strictly disallowed to avoid clinical" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 317, + 73, + 558, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 73, + 558, + 120 + ], + "spans": [ + { + "bbox": [ + 317, + 73, + 558, + 120 + ], + "type": "text", + "content": "misjudgments. Additionally, delegating decision-making authority to external classification models must be avoided to maintain end-to-end explainability and a clear causal link in the decision-making pipeline." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 317, + 128, + 558, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 128, + 558, + 354 + ], + "spans": [ + { + "bbox": [ + 317, + 128, + 558, + 354 + ], + "type": "text", + "content": "8.2.4 Time-Sensitive Scenarios. In tasks highly sensitive to response delays, such as real-time risk warnings and trading decisions in the financial sector, heuristic rules should be employed to prioritize indexing of frequently queried knowledge units (e.g., volatility indices and liquidity indicators) at the top of the search hierarchy. Directed retrieval expansion strategies that preload potentially associated information (e.g., contractual clauses of derivative instruments tied to underlying assets) can further reduce latency in multi-turn interactions. Monte Carlo Tree Search (MCTS) and other sample-based algorithms are ill-suited for such scenarios due to the excessive computational complexity caused by branch expansion, rendering them infeasible within tight time constraints (e.g., milliseconds). Similarly, the invocation of complex mathematical solvers (e.g., numerical solutions for stochastic differential equations) can introduce uncontrollable delays and should be replaced with lightweight rule-based mechanisms (e.g., threshold-triggering mechanisms based on historical volatility ranges)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 317, + 364, + 558, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 364, + 558, + 625 + ], + "spans": [ + { + "bbox": [ + 317, + 364, + 558, + 625 + ], + "type": "text", + "content": "8.2.5 Risk-Sensitive Scenarios. For scenarios with minimal tolerance for errors, such as contract clause generation and citation of judicial interpretations in the legal sector, a dual-layer defensive mechanism must be employed. A pre-action review layer should validate the compliance of generated content with statutory standards (e.g., ensuring consistency between liability clauses and Article 577 of the Civil Code), while a reliability validation layer performs cross-referencing validation across multiple sources (e.g., aligning Supreme Court precedents with regional court guidelines) to resolve potential conflicts. Retrieval systems must include version control modules to track and update legal references (e.g., automatically flagging repealed local statutes). Unconstrained reinforcement learning-based text generation methods must be avoided, as their exploratory nature risks violating the normative requirements of legal documents (e.g., generating presumptive liability terms unsupported by judicial interpretations). All decision-making actions must pass through deterministic rule engines to filter inadmissible outputs, and the system should never execute decision actions autonomously, such as generating legally binding arbitration notices without oversight." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 317, + 635, + 558, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 635, + 558, + 717 + ], + "spans": [ + { + "bbox": [ + 317, + 635, + 558, + 717 + ], + "type": "text", + "content": "8.2.6 Complex Path Exploration Scenarios. In exploration tasks involving multiple possible trajectories, such as differential diagnosis and therapeutic pathway optimization in medicine, weighted ranking search algorithms should balance search depth and breadth. Knowledge graph topology can guide prioritization (e.g., standard treatment procedures for acute coronary syndrome), while Monte Carlo Tree" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 48, + 233, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 48, + 233, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 48, + 233, + 57 + ], + "type": "text", + "content": "Synergizing RAG and Reasoning: A Systematic Review" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 400, + 48, + 558, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 400, + 48, + 558, + 57 + ], + "spans": [ + { + "bbox": [ + 400, + 48, + 558, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 72, + 295, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 72, + 295, + 228 + ], + "spans": [ + { + "bbox": [ + 50, + 72, + 295, + 228 + ], + "type": "text", + "content": "Search can extend exploration into uncommon differential paths (e.g., rare genetic metabolic disorders). Dynamic pruning threshold functions should be designed (e.g., adjusting the scope of differential diagnosis based on patient history) to eliminate low-confidence hypotheses in real time, thereby controlling computational scale. Brute-force searching of all potential paths (e.g., concurrently testing hundreds of pathogens for nonspecific symptoms) should be avoided to prevent exponential computational scaling. Careful handling of specific token triggers during retrieval (e.g., avoiding spurious associations between \"fever\" and unrelated oncological hyperthermia research) is critical to maintaining logical coherence in diagnostic reasoning." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 242, + 165, + 254 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 242, + 165, + 254 + ], + "spans": [ + { + "bbox": [ + 52, + 242, + 165, + 254 + ], + "type": "text", + "content": "8.3 Opportunity Points" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 255, + 295, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 255, + 295, + 340 + ], + "spans": [ + { + "bbox": [ + 50, + 255, + 295, + 340 + ], + "type": "text", + "content": "Based on the Do's and Don'ts of current technologies analyzed in the previous section, there remain numerous directions with substantial academic value and application potential that have yet to be fully explored. This section systematically discusses several promising opportunity points across three dimensions: data and indexing, models and methodologies, and application services." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 351, + 169, + 362 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 351, + 169, + 362 + ], + "spans": [ + { + "bbox": [ + 52, + 351, + 169, + 362 + ], + "type": "text", + "content": "8.3.1 Data and Indexing." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 373, + 295, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 373, + 295, + 491 + ], + "spans": [ + { + "bbox": [ + 50, + 373, + 295, + 491 + ], + "type": "text", + "content": "Cold-Hot Tiered Indexing and Dynamic Context Management. The challenge of managing massive and highly heterogeneous data resources lies in devising an effective cold-hot tiered indexing mechanism that prioritizes data according to their frequency of use and importance. Such a mechanism not only demands classification of data based on timeliness and access frequency but also requires integration with dynamic context management. This allows the system to intelligently retrieve the most relevant data according to the immediate context." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 492, + 295, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 492, + 295, + 601 + ], + "spans": [ + { + "bbox": [ + 50, + 492, + 295, + 601 + ], + "type": "text", + "content": "Moreover, a dynamically updated indexing mechanism can mitigate the loss of data timeliness, which often leads to deteriorated inference accuracy. By ensuring access to the most recent and task-appropriate data, this approach reduces redundancy and incorrect retrievals associated with static indexing. When combined with automated task scheduling and resource allocation strategies, fine-grained real-time inference support can be achieved, significantly enhancing the system's overall efficiency." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 610, + 295, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 610, + 295, + 718 + ], + "spans": [ + { + "bbox": [ + 50, + 610, + 295, + 718 + ], + "type": "text", + "content": "Cross-Institution Knowledge Base Construction. The construction of cross-institution or cross-domain knowledge bases offers new opportunities for advancing RAG+Reasoning research. At the core of large-scale cross-institutional knowledge bases lies the optimization of data integration and sharing mechanisms. This entails addressing challenges such as data security and privacy while adopting standardized data interfaces or leveraging federated learning paradigms to enable multidimensional data integration." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 72, + 559, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 559, + 156 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 559, + 156 + ], + "type": "text", + "content": "Through semantic alignment across multiple sources, entity resolution, and concept abstraction, cross-institutional knowledge can be transformed into authoritative and richly contextualized knowledge bases. These enhanced repositories provide robust contextual support for reasoning tasks and can deliver deeper insights in areas such as healthcare, finance, and urban management." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 165, + 567, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 165, + 567, + 273 + ], + "spans": [ + { + "bbox": [ + 313, + 165, + 567, + 273 + ], + "type": "text", + "content": "Fine-Grained Layering and Confidence Grading. In scenarios where retrieval and reasoning operate synchronously, the interpretability and reliability of generated outcomes are paramount. Fine-grained layering of data and indices, along with confidence grading of retrieval results, enables the system to selectively use the most trustworthy and relevant subsets of data during different stages of reasoning. This approach fosters transparency and traceability in final decisions or generative outputs." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 274, + 559, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 274, + 559, + 357 + ], + "spans": [ + { + "bbox": [ + 313, + 274, + 559, + 357 + ], + "type": "text", + "content": "For instance, in medical diagnosis scenarios, confidence grading can initiate additional verification or expert review in high-risk cases. In the legal domain, confidence layering systematically presents key evidence and identifies sources of uncertainty, reducing reasoning vulnerabilities and minimizing the risk of erroneous conclusions caused by information ambiguity." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 365, + 471, + 377 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 365, + 471, + 377 + ], + "spans": [ + { + "bbox": [ + 314, + 365, + 471, + 377 + ], + "type": "text", + "content": "8.3.2 Models and Methodologies." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 386, + 564, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 386, + 564, + 542 + ], + "spans": [ + { + "bbox": [ + 313, + 386, + 564, + 542 + ], + "type": "text", + "content": "Event-Driven Active Retrieval. Traditional retrieval mechanisms are predominantly passive. However, event-driven active retrieval presents a promising exploration avenue. By monitoring critical events, such as the injection of new data, user interactions, or changes in external sensors, event-triggered retrieval and reasoning processes can be initiated to capture and respond to potential risks and opportunities in real time. Integrating methodologies such as sequence-based event detection or multitask-learning-based intent recognition can facilitate automatic determination of when and how to trigger retrieval actions. Iteratively optimizing these processes contributes to a more efficient and continuous reasoning loop." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 551, + 563, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 551, + 563, + 658 + ], + "spans": [ + { + "bbox": [ + 313, + 551, + 563, + 658 + ], + "type": "text", + "content": "Spatiotemporal-Aware Retrieval and Association. Many applications, such as natural disaster monitoring, traffic flow prediction, and inventory management in retail, exhibit strong dependencies on temporal and spatial dimensions. By incorporating spatiotemporal-aware algorithms, retrieval processes can prioritize or emphasize crucial documents according to constraints tied to time and space. This not only enhances timeliness but also improves the purposefulness and accuracy of reasoning." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 658, + 559, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 658, + 559, + 719 + ], + "spans": [ + { + "bbox": [ + 313, + 658, + 559, + 719 + ], + "type": "text", + "content": "Furthermore, modeling the evolution of events within spatiotemporal dimensions—when combined with semantic indexing and vector-based retrieval mechanisms in RAG—can enable more precise characterization and utilization of complex spatiotemporal dynamics during reasoning." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "spans": [ + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "type": "text", + "content": "Gao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 72, + 294, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 72, + 294, + 179 + ], + "spans": [ + { + "bbox": [ + 53, + 72, + 294, + 179 + ], + "type": "text", + "content": "Multimodal Fusion in Retrieval and Reasoning. Multimodal data (e.g., text, images, audio, video, and sensor data) collectively constitute a richer contextual environment, offering critical cues for reasoning tasks. However, existing studies are often limited to the retrieval of single or a few data modalities. Advancing research on multimodal fusion and reasoning mechanisms under the RAG+Reasoning framework has the potential to greatly enhance the system's capacity for addressing complex queries." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 180, + 294, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 180, + 294, + 311 + ], + "spans": [ + { + "bbox": [ + 53, + 180, + 294, + 311 + ], + "type": "text", + "content": "The research focus lies in constructing cross-modal representation learning and alignment methods, enabling unified representations of the same entities or events across different modalities. During retrieval, confidence scores for each modality can be integrated into a comprehensive ranking process, culminating in multimodal-informed joint decision-making during reasoning. This approach not only improves contextual understanding in complex tasks but also broadens the application scope of RAG technologies in scenarios such as expert systems and autonomous driving, where sensory integration and interpretation are critical." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 324, + 294, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 324, + 294, + 466 + ], + "spans": [ + { + "bbox": [ + 53, + 324, + 294, + 466 + ], + "type": "text", + "content": "Dynamic Risk Propagation Modeling and Management. The tight coupling of retrieval and reasoning with multi-stage decision-making inevitably introduces risk propagation issues. Misjudgments of high-risk or low-confidence documents during upstream retrieval are often inherited by downstream reasoning processes, amplifying uncertainties and increasing error margins. To address this, dynamic risk modeling should be embedded within retrieval workflows, enabling risk quantification, tracking, and management at multiple stages. When necessary, risk mitigation mechanisms or process rollbacks can be triggered, creating a closed-loop correction framework." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 468, + 294, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 468, + 294, + 574 + ], + "spans": [ + { + "bbox": [ + 53, + 468, + 294, + 574 + ], + "type": "text", + "content": "Incorporating strategies for analyzing and managing risk propagation is not only a technical challenge but also a matter of system deployment and standardization. In high-stakes domains such as healthcare and financial risk management, establishing comprehensive safety standards and compliance protocols will be crucial. These protocols should treat dynamic risk propagation management as a critical component of evaluating and iterating knowledge retrieval and reasoning systems." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 588, + 178, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 588, + 178, + 598 + ], + "spans": [ + { + "bbox": [ + 53, + 588, + 178, + 598 + ], + "type": "text", + "content": "8.3.3 Application Services." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 611, + 294, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 611, + 294, + 717 + ], + "spans": [ + { + "bbox": [ + 53, + 611, + 294, + 717 + ], + "type": "text", + "content": "Validation of Logical Chain Completeness. While RAG with Reasoning can provide partially interpretable reasoning outputs, verifying the completeness of logical chains remains a challenge. Future research could integrate formal verification or symbolic reasoning techniques to ensure consistency and completeness across key reasoning nodes and intermediate conclusions. This would prevent logical gaps or illogical leaps in reasoning, offering robust regulatory support for high-stakes industries such as law and finance." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 317, + 72, + 558, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 72, + 558, + 179 + ], + "spans": [ + { + "bbox": [ + 317, + 72, + 558, + 179 + ], + "type": "text", + "content": "Intervenable Generation During Reasoning. Contemporary Agentic RAG often operate as \"black boxes,\" rendering external interventions nearly impossible during generative reasoning tasks. However, providing mechanisms for human intervention—such as through visualization or interactive interfaces—could enable experts or users to perform manual corrections, initialize prior knowledge, or modify interim assumptions during the reasoning process. This would substantially enhance the system's flexibility and safety." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 317, + 180, + 558, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 180, + 558, + 274 + ], + "spans": [ + { + "bbox": [ + 317, + 180, + 558, + 274 + ], + "type": "text", + "content": "Specifically, intervenable generation allows not only post hoc error corrections but also proactive identification and rectification of potential risks or biases at earlier stages. Interactive interpretable reasoning platforms or visualization tools grounded in knowledge graphs could empower users to scrutinize and influence reasoning workflows, thereby enhancing confidence and control in decision-making processes across diverse domains." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 317, + 282, + 558, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 282, + 558, + 399 + ], + "spans": [ + { + "bbox": [ + 317, + 282, + 558, + 399 + ], + "type": "text", + "content": "Risk Decision Interception Firewalls. In closed-loop automated tasks such as algorithmic trading or medical diagnostic decision-making, erroneous reasoning outputs can lead to catastrophic outcomes. To mitigate such risks, the system architecture should incorporate risk decision interception firewalls, which perform multidimensional validations at critical reasoning nodes or prior to outputting decisions. When confidence levels or high-risk indicators breach thresholds, these firewalls can block decision outputs or escalate them for stricter human review." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 317, + 401, + 558, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 401, + 558, + 460 + ], + "spans": [ + { + "bbox": [ + 317, + 401, + 558, + 460 + ], + "type": "text", + "content": "This mechanism serves as a \"final line of defense\" for RAG+Reasoning systems, ensuring decision security in large-scale automated information networks. It also provides a robust foundation for compliance and regulatory auditing, enabling safer deployment in critical applications." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 317, + 468, + 558, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 468, + 558, + 562 + ], + "spans": [ + { + "bbox": [ + 317, + 468, + 558, + 562 + ], + "type": "text", + "content": "Edge-Cloud Collaborative Retrieval and Reasoning. With the rapid development of IoT and 5G technologies, many scenarios demand on-site data collection and preliminary processing on edge devices, followed by high-level retrieval and reasoning tasks on cloud platforms. Efficiently partitioning tasks, allocating resources, and maintaining consistency between indexes and models across the edge-cloud continuum represent critical research directions." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 317, + 563, + 558, + 658 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 563, + 558, + 658 + ], + "spans": [ + { + "bbox": [ + 317, + 563, + 558, + 658 + ], + "type": "text", + "content": "Leveraging techniques such as lightweight model compression, distributed index synchronization, and communication optimization can ensure fast reasoning while maximizing resource utilization. Edge-cloud collaborative solutions are particularly impactful for real-time industrial monitoring and smart city applications, reducing network latency and bandwidth bottlenecks while ensuring accurate and timely inference outputs." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 317, + 659, + 558, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 659, + 558, + 716 + ], + "spans": [ + { + "bbox": [ + 317, + 659, + 558, + 716 + ], + "type": "text", + "content": "In summary, RAG+Reasoning systems present many untapped opportunities across various dimensions. Further research and practical validation could greatly improve their use in complex, high-risk scenarios while fueling new growth in GenAI." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "text", + "content": "Synergizing RAG and Reasoning: A Systematic Review" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 400, + 47, + 558, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 400, + 47, + 558, + 57 + ], + "spans": [ + { + "bbox": [ + 400, + 47, + 558, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 72, + 148, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 72, + 148, + 83 + ], + "spans": [ + { + "bbox": [ + 52, + 72, + 148, + 83 + ], + "type": "text", + "content": "9 Future Trends" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 87, + 294, + 123 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 87, + 294, + 123 + ], + "spans": [ + { + "bbox": [ + 52, + 87, + 294, + 123 + ], + "type": "text", + "content": "In this chapter, we summarize four major trends in technological advancements based on current research, aiming to elucidate and guide the potential future directions of RAG." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 52, + 141, + 233, + 153 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 141, + 233, + 153 + ], + "spans": [ + { + "bbox": [ + 52, + 141, + 233, + 153 + ], + "type": "text", + "content": "9.1 The Integration of RAG and Graph" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 156, + 294, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 156, + 294, + 239 + ], + "spans": [ + { + "bbox": [ + 52, + 156, + 294, + 239 + ], + "type": "text", + "content": "Recent developments have witnessed a growing synergy between RAG systems and graph-based approaches. The intrinsic benefits of graph structures, such as explicit logical relationships and knowledge indexing, have enabled new paradigms for addressing challenges in global reasoning, dynamic data management, and personalized services within RAG systems." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 63, + 240, + 178, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 240, + 178, + 251 + ], + "spans": [ + { + "bbox": [ + 63, + 240, + 178, + 251 + ], + "type": "text", + "content": "Knowledge Organization." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 252, + 295, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 252, + 295, + 526 + ], + "spans": [ + { + "bbox": [ + 53, + 252, + 295, + 526 + ], + "type": "text", + "content": "Graph-structured knowledge organization frameworks offer a powerful alternative to traditional vector-based retrieval methods, excelling in modeling complex relationships and supporting global reasoning. For example, GraphRAG [18] combines hierarchical graph indexing with community detection to extract entity relationship networks from text corpora, enabling large-scale thematic analysis through hierarchical summaries. Building on this, PIKE [82] introduces a multi-level heterogeneous knowledge graph that organizes documents, semantic segments, and refined knowledge units into a three-layer hierarchy, improving extraction accuracy and multi-hop reasoning via atomized knowledge construction and task decomposition. For dynamic personalization, EMG-RAG [89] features a three-layer Editable Memory Graph architecture that structures memory data by ontology classification, subclass, and entity relationships, using reinforcement learning to enable real-time updates and multidimensional queries. Together, these advances leverage graph topologies to address the limitations of conventional RAG systems—such as one-dimensional representation and weak contextual links—enabling multilevel reasoning from local fact retrieval to global thematic summarization and forming a foundation for interpretable, adaptive RAG systems." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 527, + 295, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 527, + 295, + 693 + ], + "spans": [ + { + "bbox": [ + 53, + 527, + 295, + 693 + ], + "type": "text", + "content": "Symbolic Reasoning. Graph-structured symbolic reasoning methods leverage the multi-hop reasoning power of Knowledge Graphs (KG) to better manage complex semantic and logical relationships. Frameworks like HippoRAG2 and the Think-on-Graph (ToG) [60] series exemplify this. HippoRAG2 [28] builds open knowledge graphs and uses personalized PageRank with a dense-sparse coding approach inspired by brain memory, boosting performance in factual memory, semantic understanding, and multi-hop reasoning. Likewise, ToG-2 combines iterative retrieval of knowledge graphs and documents, using relationship discovery, entity pruning, and context-driven graph searches to integrate fine-grained information from unstructured text, enhancing implicit relationship detection." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 52, + 694, + 294, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 694, + 294, + 718 + ], + "spans": [ + { + "bbox": [ + 52, + 694, + 294, + 718 + ], + "type": "text", + "content": "Task Planning. Graph-based task planning in RAG systems enhances complex problem-solving by overcoming the" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 317, + 72, + 558, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 72, + 558, + 215 + ], + "spans": [ + { + "bbox": [ + 317, + 72, + 558, + 215 + ], + "type": "text", + "content": "limitations of traditional linear workflows, which struggle with multi-step or multimodal reasoning. These approaches build dynamic knowledge graphs, like Mind Maps, to explicitly model logical dependencies and context. For instance, the Agentic Reasoning [92] transforms reasoning chains into graph structures for entity extraction, relation identification, and community clustering, enabling dynamic path tracking and optimized retrieval, excelling in tasks like doctoral-level GPQA [67]. Collaborative frameworks such as Co-STORM extend this to multi-agent scenarios, representing queries, tool calls, and knowledge integration as traversable graph nodes to support task decomposition and adaptive reasoning." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 317, + 217, + 570, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 217, + 570, + 346 + ], + "spans": [ + { + "bbox": [ + 317, + 217, + 570, + 346 + ], + "type": "text", + "content": "Tool Usage and Management. Graph-enhanced approaches to tool management overcome limitations of traditional dependency modeling by effectively capturing complex relationships like parameter passing, functional collaboration, and resource management. Graph RAG-Tool Fusion [57] models tools as graph nodes within a dual-layer architecture of core system APIs and domain-specific tools, encoding direct and indirect dependencies as edges. It uses a two-stage retrieval process: vector-based tool retrieval followed by a graph-based depth-first search to assemble dependency-compliant toolsets." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 317, + 357, + 462, + 367 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 357, + 462, + 367 + ], + "spans": [ + { + "bbox": [ + 317, + 357, + 462, + 367 + ], + "type": "text", + "content": "9.2 Multi-Model Collaboration" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 317, + 371, + 572, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 371, + 572, + 598 + ], + "spans": [ + { + "bbox": [ + 317, + 371, + 572, + 598 + ], + "type": "text", + "content": "Multi-model collaboration has emerged as a pivotal strategy for enhancing task complexity handling and domain adaptability in RAG systems [13]. By integrating the strengths of different models, this approach achieves optimized performance. For example, the CR-Planner [52] combines general-purpose generation models (e.g., GPT-4) with domain-specific critic models (e.g., Llama-3-8B). This hybrid system dynamically orchestrates subgoal planning and execution evaluation, utilizing MCTS to generate high-quality training data. Similarly, UAR [14] employs intent-aware and knowledgerequirement classifiers to dynamically trigger retrieval, decoupling lightweight classification tasks from resource-intensive decoding operations of LLMs. Furthermore, Adaptive-RAG [41] deploys small-complexity classifiers to route queries into different levels of processing strategies, balancing response speed for simple queries with deep reasoning for complex ones. These strategies form a closed \"generation-evaluation\"loop, leveraging complementary strengths across models to achieve improved accuracy and computational efficiency." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 317, + 608, + 462, + 618 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 608, + 462, + 618 + ], + "spans": [ + { + "bbox": [ + 317, + 608, + 462, + 618 + ], + "type": "text", + "content": "9.3 Multi-Modal Collaboration" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 317, + 622, + 558, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 622, + 558, + 718 + ], + "spans": [ + { + "bbox": [ + 317, + 622, + 558, + 718 + ], + "type": "text", + "content": "The breakthrough in Chain-of-Thought (CoT) capabilities of language models has catalyzed the transition of multimodal reasoning from perceptual-level integration to cognitive-level reasoning, promoting Multimodal Collaborative Reasoning as a key trend [4] By deeply integrating the logical reasoning capabilities of language models with the spatial-semantic representation of multimodal data, it significantly enhances information synthesis in complex scenarios [2]." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "spans": [ + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "type": "text", + "content": "Gao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 72, + 294, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 72, + 294, + 156 + ], + "spans": [ + { + "bbox": [ + 50, + 72, + 294, + 156 + ], + "type": "text", + "content": "For instance, in the medical domain, multimodal RAG systems such as MedCoT [56] utilize hierarchical expert systems to integrate CT imaging and pathology reports, enabling knowledge graph validation of diagnostic hypotheses and reducing misdiagnosis risks. Future research will likely focus on robust cross-modal knowledge alignment, progressive knowledge distillation, and adaptive reasoning frameworks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 176, + 244, + 188 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 176, + 244, + 188 + ], + "spans": [ + { + "bbox": [ + 51, + 176, + 244, + 188 + ], + "type": "text", + "content": "9.4 Customized Reinforcement Learning" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 190, + 295, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 190, + 295, + 346 + ], + "spans": [ + { + "bbox": [ + 50, + 190, + 295, + 346 + ], + "type": "text", + "content": "The application of reinforcement learning (RL) in RAG systems has become instrumental in improving module coordination and enhancing overall efficiency. Recent studies focus on designing reward mechanisms tailored to the specific needs of RAG systems. Frameworks such as RAG-Gym [96] and DeepRAG [24] model reasoning processes using Markov Decision Processes and introduce fine-grained process supervision mechanisms. Additionally, ReARTeR [49] and SmartRAG [20] incorporate trust-aware reward strategies and end-to-end policy optimization to achieve superior accuracy and robustness. Opportunities remain for further exploring automated reward modeling with LLMs to facilitate fine-grained supervision." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 52, + 368, + 140, + 379 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 368, + 140, + 379 + ], + "spans": [ + { + "bbox": [ + 52, + 368, + 140, + 379 + ], + "type": "text", + "content": "10 Conclusion" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 383, + 295, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 383, + 295, + 455 + ], + "spans": [ + { + "bbox": [ + 50, + 383, + 295, + 455 + ], + "type": "text", + "content": "This paper has systematically reviewed the synergistic integration of Retrieval-Augmented Generation (RAG) and reasoning, providing a formal definition of reasoning within the RAG framework as a structured, multi-step, goal-driven process that dynamically combines parametric and retrieved knowledge to address complex problems." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 455, + 297, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 455, + 297, + 526 + ], + "spans": [ + { + "bbox": [ + 50, + 455, + 297, + 526 + ], + "type": "text", + "content": "We presented a comprehensive taxonomy covering the purposes, collaboration paradigms, and implementation methods underlying RAG+Reasoning systems. The synergy enables more precise retrieval informed by logical analysis and enhances reasoning with contextually relevant, up-to-date knowledge beyond parametric limitations." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 527, + 295, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 527, + 295, + 622 + ], + "spans": [ + { + "bbox": [ + 50, + 527, + 295, + 622 + ], + "type": "text", + "content": "While the enhanced reasoning capabilities allow tackling complex knowledge-intensive tasks such as deep research, expert-level problem solving, and domain-specific decision support, practical challenges remain. These include computational and token costs that grow non-linearly, risks of overthinking leading to inefficiency and error propagation, and the lack of evaluation frameworks that effectively assess intermediate reasoning quality alongside final results." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 622, + 295, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 622, + 295, + 694 + ], + "spans": [ + { + "bbox": [ + 50, + 622, + 295, + 694 + ], + "type": "text", + "content": "To bridge the gap from theory to real-world application, we proposed practical design guidelines tailored to diverse domains like finance, healthcare, law, and personal assistants, emphasizing adaptability to heterogeneous, dynamic knowledge sources and strict requirements for output reliability and traceability." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 694, + 295, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 694, + 295, + 719 + ], + "spans": [ + { + "bbox": [ + 51, + 694, + 295, + 719 + ], + "type": "text", + "content": "Finally, we identified promising directions for future research, including graph-structured knowledge integration," + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 72, + 559, + 108 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 559, + 108 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 559, + 108 + ], + "type": "text", + "content": "multimodal and multi-model collaborative reasoning architectures, and advanced reinforcement learning techniques for optimizing retrieval-reasoning workflows." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 108, + 559, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 108, + 559, + 168 + ], + "spans": [ + { + "bbox": [ + 313, + 108, + 559, + 168 + ], + "type": "text", + "content": "Overall, this work establishes both a theoretical foundation and practical roadmap to drive the development of next-generation RAG+Reasoning systems capable of robust, transparent, and efficient cognition, paving the way for impactful applications across academia and industry." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 183, + 378, + 195 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 183, + 378, + 195 + ], + "spans": [ + { + "bbox": [ + 315, + 183, + 378, + 195 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 320, + 198, + 559, + 718 + ], + "type": "list", + "angle": 0, + "index": 27, + "blocks": [ + { + "bbox": [ + 323, + 198, + 559, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 198, + 559, + 239 + ], + "spans": [ + { + "bbox": [ + 323, + 198, + 559, + 239 + ], + "type": "text", + "content": "[1] Abdelrahman Abdallah, Bhawna Piryani, Jamshid Mozafari, Mohammed Ali, and Adam Jatowt. 2025. Rankify: A comprehensive python toolkit for retrieval, re-ranking, and retrieval-augmented generation. arXiv preprint arXiv:2502.02464 (2025)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 323, + 239, + 559, + 298 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 239, + 559, + 298 + ], + "spans": [ + { + "bbox": [ + 323, + 239, + 559, + 298 + ], + "type": "text", + "content": "[2] Mohammad Mahdi Abootorabi, Amirhosein Zobeiri, Mahdi Dehghani, Mohammadali Mohammadkhani, Bardia Mohammadi, Omid Ghahroodi, Mahdieh Soleymani Baghshah, and Ehsaneddin Asgari. 2025. Ask in Any Modality: A Comprehensive Survey on Multimodal Retrieval-Augmented Generation. arXiv preprint arXiv:2502.08826 (2025)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 323, + 300, + 559, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 323, + 300, + 559, + 339 + ], + "spans": [ + { + "bbox": [ + 323, + 300, + 559, + 339 + ], + "type": "text", + "content": "[3] Akari Asai, Zeqiu Wu, Yizhong Wang, Avirup Sil, and Hannaneh Hajishirzi. 2023. Self-rag: Learning to retrieve, generate, and critique through self-reflection. In The Twelfth International Conference on Learning Representations." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 324, + 339, + 559, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 339, + 559, + 379 + ], + "spans": [ + { + "bbox": [ + 324, + 339, + 559, + 379 + ], + "type": "text", + "content": "[4] Jing Bi, Susan Liang, Xiaofei Zhou, Pinxin Liu, Junjia Guo, Yunlong Tang, Luchuan Song, Chao Huang, Guangyu Sun, Jinxi He, et al. 2025. Why Reasoning Matters? A Survey of Advancements in Multimodal Reasoning (v1). arXiv preprint arXiv:2504.03151 (2025)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 324, + 379, + 559, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 379, + 559, + 409 + ], + "spans": [ + { + "bbox": [ + 324, + 379, + 559, + 409 + ], + "type": "text", + "content": "[5] Yuxi Bi, Yunfan Gao, and Haofen Wang. 2025. StePO-Rec: Towards Personalized Outfit Styling Assistant via Knowledge-Guided Multi-Step Reasoning. arXiv preprint arXiv:2504.09915 (2025)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 324, + 409, + 559, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 409, + 559, + 449 + ], + "spans": [ + { + "bbox": [ + 324, + 409, + 559, + 449 + ], + "type": "text", + "content": "[6] Mingyang Chen, Tianpeng Li, Haoze Sun, Yijie Zhou, Chenzheng Zhu, Fan Yang, Zenan Zhou, Weipeng Chen, Haofen Wang, Jeff Z Pan, et al. 2025. Learning to Reason with Search for LLMs via Reinforcement Learning. arXiv preprint arXiv:2503.19470 (2025)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 324, + 449, + 559, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 449, + 559, + 488 + ], + "spans": [ + { + "bbox": [ + 324, + 449, + 559, + 488 + ], + "type": "text", + "content": "[7] Peter Baile Chen, Yi Zhang, Michael Cafarella, and Dan Roth. 2025. Can we Retrieve Everything All at Once? ARM: An Alignment-Oriented LLM-based Retrieval Method. arXiv preprint arXiv:2501.18539 (2025)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 324, + 488, + 559, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 488, + 559, + 538 + ], + "spans": [ + { + "bbox": [ + 324, + 488, + 559, + 538 + ], + "type": "text", + "content": "[8] Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiannan Guan, Peng Wang, Mengkang Hu, Yuhang Zhou, Te Gao, and Wangxiang Che. 2025. Towards reasoning era: A survey of long chain-of-thought for reasoning large language models. arXiv preprint arXiv:2503.09567 (2025)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 324, + 538, + 559, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 538, + 559, + 578 + ], + "spans": [ + { + "bbox": [ + 324, + 538, + 559, + 578 + ], + "type": "text", + "content": "[9] Wenhu Chen, Ming Yin, Max Ku, Pan Lu, Yixin Wan, Xueguang Ma, Jianyu Xu, Xinyi Wang, and Tony Xia. 2023. Theoremqa: A theorem-driven question answering dataset. arXiv preprint arXiv:2305.12524 (2023)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 320, + 578, + 559, + 618 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 578, + 559, + 618 + ], + "spans": [ + { + "bbox": [ + 320, + 578, + 559, + 618 + ], + "type": "text", + "content": "[10] Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, et al. 2024. Do not think that much for " + }, + { + "bbox": [ + 320, + 578, + 559, + 618 + ], + "type": "inline_equation", + "content": "2 + 3 = ?" + }, + { + "bbox": [ + 320, + 578, + 559, + 618 + ], + "type": "text", + "content": " on the overthinking of o1-like llms. arXiv preprint arXiv:2412.21187 (2024)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 320, + 618, + 559, + 648 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 618, + 559, + 648 + ], + "spans": [ + { + "bbox": [ + 320, + 618, + 559, + 648 + ], + "type": "text", + "content": "[11] Yixiang Chen, Penglei Sun, Xiang Li, and Xiaowen Chu. 2025. MRD-RAG: Enhancing Medical Diagnosis with Multi-Round Retrieval-Augmented Generation. arXiv preprint arXiv:2504.07724 (2025)." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 320, + 648, + 559, + 688 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 648, + 559, + 688 + ], + "spans": [ + { + "bbox": [ + 320, + 648, + 559, + 688 + ], + "type": "text", + "content": "[12] Yiqun Chen, Lingyong Yan, Weiwei Sun, Xinyu Ma, Yi Zhang, Shuaiqiang Wang, Dawei Yin, Yiming Yang, and Jiaxin Mao. 2025. Improving Retrieval-Augmented Generation through Multi-Agent Reinforcement Learning. arXiv preprint arXiv:2501.15228 (2025)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 320, + 688, + 559, + 718 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 688, + 559, + 718 + ], + "spans": [ + { + "bbox": [ + 320, + 688, + 559, + 718 + ], + "type": "text", + "content": "[13] Zhijun Chen, Jingzheng Li, Pengpeng Chen, Zhuoran Li, Kai Sun, Yuankai Luo, Qianren Mao, Dingqi Yang, Hailong Sun, and Philip S Yu. 2025. Harnessing Multiple Large Language Models: A Survey on" + } + ] + } + ], + "index": 26 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "text", + "content": "Synergizing RAG and Reasoning: A Systematic Review" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 399, + 47, + 558, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 47, + 558, + 57 + ], + "spans": [ + { + "bbox": [ + 399, + 47, + 558, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 30 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 74, + 296, + 712 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 73, + 74, + 251, + 83 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 74, + 251, + 83 + ], + "spans": [ + { + "bbox": [ + 73, + 74, + 251, + 83 + ], + "type": "text", + "content": "LLM Ensemble. arXiv preprint arXiv:2502.18036 (2025)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 84, + 296, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 84, + 296, + 123 + ], + "spans": [ + { + "bbox": [ + 56, + 84, + 296, + 123 + ], + "type": "text", + "content": "[14] Qinyuan Cheng, Xiaonan Li, Shimin Li, Qin Zhu, Zhangyue Yin, Yunfan Shao, Linyang Li, Tianxiang Sun, Hang Yan, and Xipeng Qiu. 2024. Unified active retrieval for retrieval augmented generation. arXiv preprint arXiv:2406.12534 (2024)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 124, + 296, + 173 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 124, + 296, + 173 + ], + "spans": [ + { + "bbox": [ + 56, + 124, + 296, + 173 + ], + "type": "text", + "content": "[15] Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, et al. 2025. The Danger of Overthinking: Examining the Reasoning-Action Dilemma in Agentic Tasks. arXiv preprint arXiv:2502.08235 (2025)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 174, + 294, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 174, + 294, + 203 + ], + "spans": [ + { + "bbox": [ + 56, + 174, + 294, + 203 + ], + "type": "text", + "content": "[16] Alan Dao and Thinh Le. 2025. ReZero: Enhancing LLM search ability by trying one-more-time. arXiv:2504.11001 [cs.CL] https://arxiv.org/abs/2504.11001" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 204, + 295, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 204, + 295, + 233 + ], + "spans": [ + { + "bbox": [ + 56, + 204, + 295, + 233 + ], + "type": "text", + "content": "[17] Tim Dettmers, Artidoro Pagnoni, Ari Holtzman, and Luke Zettlemoyer. 2023. Qlora: Efficient finetuning of quantized llms. Advances in neural information processing systems 36 (2023), 10088-10115." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 234, + 295, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 234, + 295, + 282 + ], + "spans": [ + { + "bbox": [ + 56, + 234, + 295, + 282 + ], + "type": "text", + "content": "[18] Darren Edge, Ha Trinh, Newman Cheng, Joshua Bradley, Alex Chao, Apurva Mody, Steven Truitt, Dasha Metropolitansky, Robert Oazuwa Ness, and Jonathan Larson. 2024. From local to global: A graph rag approach to query-focused summarization. arXiv preprint arXiv:2404.16130 (2024)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 284, + 294, + 313 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 284, + 294, + 313 + ], + "spans": [ + { + "bbox": [ + 56, + 284, + 294, + 313 + ], + "type": "text", + "content": "[19] Chenrui Fan, Ming Li, Lichao Sun, and Tianyi Zhou. 2025. Missing Premise exacerbates Overthinking: Are Reasoning Models losing Critical Thinking Skill? arXiv preprint arXiv:2504.06514 (2025)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 314, + 294, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 314, + 294, + 342 + ], + "spans": [ + { + "bbox": [ + 56, + 314, + 294, + 342 + ], + "type": "text", + "content": "[20] Jingsheng Gao, Linxu Li, Weiyuan Li, Yuzhuo Fu, and Bin Dai. 2024. SmartRAG: Jointly Learn RAG-Related Tasks From the Environment Feedback. arXiv preprint arXiv:2410.18141 (2024)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 343, + 294, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 343, + 294, + 382 + ], + "spans": [ + { + "bbox": [ + 56, + 343, + 294, + 382 + ], + "type": "text", + "content": "[21] Yunfan Gao, Yun Xiong, Xinyu Gao, Kangxiang Jia, Jinliu Pan, Yuxi Bi, Yi Dai, Jiawei Sun, and Haofen Wang. 2023. Retrieval-augmented generation for large language models: A survey. arXiv preprint arXiv:2312.10997 (2023)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 384, + 294, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 384, + 294, + 413 + ], + "spans": [ + { + "bbox": [ + 56, + 384, + 294, + 413 + ], + "type": "text", + "content": "[22] Yunfan Gao, Yun Xiong, Meng Wang, and Haofen Wang. 2024. Modular rag: Transforming rag systems into lego-like reconfigurable frameworks. arXiv preprint arXiv:2407.21059 (2024)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 414, + 294, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 414, + 294, + 453 + ], + "spans": [ + { + "bbox": [ + 56, + 414, + 294, + 453 + ], + "type": "text", + "content": "[23] Zengyi Gao, Yukun Cao, Hairu Wang, Ao Ke, Yuan Feng, Xike Xie, and S Kevin Zhou. 2025. FRAG: A Flexible Modular Framework for Retrieval-Augmented Generation based on Knowledge Graphs. arXiv preprint arXiv:2501.09957 (2025)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 453, + 294, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 453, + 294, + 492 + ], + "spans": [ + { + "bbox": [ + 56, + 453, + 294, + 492 + ], + "type": "text", + "content": "[24] Xinyan Guan, Jiali Zeng, Fandong Meng, Chunlei Xin, Yaojie Lu, Hongyu Lin, Xianpei Han, Le Sun, and Jie Zhou. 2025. DeepRAG: Thinking to Retrieve Step by Step for Large Language Models. arXiv preprint arXiv:2502.01142 (2025)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 493, + 294, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 493, + 294, + 532 + ], + "spans": [ + { + "bbox": [ + 56, + 493, + 294, + 532 + ], + "type": "text", + "content": "[25] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 533, + 294, + 571 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 533, + 294, + 571 + ], + "spans": [ + { + "bbox": [ + 56, + 533, + 294, + 571 + ], + "type": "text", + "content": "[26] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 572, + 294, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 572, + 294, + 592 + ], + "spans": [ + { + "bbox": [ + 56, + 572, + 294, + 592 + ], + "type": "text", + "content": "[27] Zirui Guo, Lianghao Xia, Yanhua Yu, Tu Ao, and Chao Huang. 2024. Lighthrag: Simple and fast retrieval-augmented generation. (2024)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 56, + 593, + 294, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 593, + 294, + 622 + ], + "spans": [ + { + "bbox": [ + 56, + 593, + 294, + 622 + ], + "type": "text", + "content": "[28] Bernal Jiménez Gutiérrez, Yiheng Shu, Weijian Qi, Sizhe Zhou, and Yu Su. 2025. From RAG to Memory: Non-Parametric Continual Learning for Large Language Models. arXiv preprint arXiv:2502.14802 (2025)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 56, + 623, + 294, + 671 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 623, + 294, + 671 + ], + "spans": [ + { + "bbox": [ + 56, + 623, + 294, + 671 + ], + "type": "text", + "content": "[29] Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, et al. 2024. Olympiadbench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008 (2024)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 56, + 672, + 294, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 672, + 294, + 712 + ], + "spans": [ + { + "bbox": [ + 56, + 672, + 294, + 712 + ], + "type": "text", + "content": "[30] Yancheng He, Shilong Li, Jiaheng Liu, Weixun Wang, Xingyuan Bu, Ge Zhang, Zhongyuan Peng, Zhaoxiang Zhang, Zhicheng Zheng, Wenbo Su, et al. 2025. Can Large Language Models Detect Errors in Long Chain-of-Thought Reasoning? arXiv preprint arXiv:2502.19361" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 320, + 75, + 559, + 701 + ], + "type": "list", + "angle": 0, + "index": 38, + "blocks": [ + { + "bbox": [ + 336, + 75, + 359, + 83 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 336, + 75, + 359, + 83 + ], + "spans": [ + { + "bbox": [ + 336, + 75, + 359, + 83 + ], + "type": "text", + "content": "(2025)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 321, + 84, + 558, + 114 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 321, + 84, + 558, + 114 + ], + "spans": [ + { + "bbox": [ + 321, + 84, + 558, + 114 + ], + "type": "text", + "content": "[31] Xanh Ho, Anh-Khoa Duong Nguyen, Saku Sugawara, and Akiko Aizawa. 2020. Constructing a multi-hop qa dataset for comprehensive evaluation of reasoning steps. arXiv preprint arXiv:2011.01060 (2020)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 320, + 114, + 558, + 143 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 114, + 558, + 143 + ], + "spans": [ + { + "bbox": [ + 320, + 114, + 558, + 143 + ], + "type": "text", + "content": "[32] Yubin Hong, Chaofan Li, Jingyi Zhang, and Yingxia Shao. 2025. FG-RAG: Enhancing Query-Focused Summarization with Context-Aware Fine-Grained Graph RAG. arXiv preprint arXiv:2504.07103 (2025)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 320, + 144, + 559, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 144, + 559, + 193 + ], + "spans": [ + { + "bbox": [ + 320, + 144, + 559, + 193 + ], + "type": "text", + "content": "[33] SU Hongjin, Howard Yen, Mengzhou Xia, Weijia Shi, Niklas Muennighoff, Han-yu Wang, Liu Haisu, Quan Shi, Zachary S Siegel, Michael Tang, et al. 2024. BRIGHT: A Realistic and Challenging Benchmark for Reasoning-Intensive Retrieval. In The Thirteenth International Conference on Learning Representations." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 320, + 194, + 558, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 194, + 558, + 223 + ], + "spans": [ + { + "bbox": [ + 320, + 194, + 558, + 223 + ], + "type": "text", + "content": "[34] Sheryl Hsu, Omar Khattab, Chelsea Finn, and Archit Sharma. 2024. Grounding by trying: Llms with reinforcement learning-enhanced retrieval. arXiv preprint arXiv:2410.23214 (2024)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 320, + 224, + 558, + 253 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 224, + 558, + 253 + ], + "spans": [ + { + "bbox": [ + 320, + 224, + 558, + 253 + ], + "type": "text", + "content": "[35] Jian Hu. 2025. REINFORCE++: A Simple and Efficient Approach for Aligning Large Language Models. arXiv preprint arXiv:2501.03262 (2025)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 320, + 254, + 558, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 254, + 558, + 282 + ], + "spans": [ + { + "bbox": [ + 320, + 254, + 558, + 282 + ], + "type": "text", + "content": "[36] Yunhai Hu, Yilun Zhao, Chen Zhao, and Arman Cohan. 2025. MCTS-RAG: Enhancing Retrieval-Augmented Generation with Monte Carlo Tree Search. arXiv preprint arXiv:2503.20757 (2025)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 320, + 284, + 559, + 323 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 284, + 559, + 323 + ], + "spans": [ + { + "bbox": [ + 320, + 284, + 559, + 323 + ], + "type": "text", + "content": "[37] Fantine Huot, Reinald Kim Amplayo, Jennimaria Palomaki, Alice Shoshana Jakobovits, Elizabeth Clark, and Mirella Lapata. 2024. Agents' Room: Narrative Generation through Multi-step Collaboration. arXiv preprint arXiv:2410.02603 (2024)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 320, + 323, + 558, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 323, + 558, + 363 + ], + "spans": [ + { + "bbox": [ + 320, + 323, + 558, + 363 + ], + "type": "text", + "content": "[38] Shayekh Bin Islam, Md Asib Rahman, KSM Hossain, Enamul Hoque, Shafiq Joty, and Md Rizwan Parvez. 2024. Open-rag: Enhanced retrieval-augmented reasoning with open-source large language models. arXiv preprint arXiv:2410.01782 (2024)." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 320, + 363, + 558, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 363, + 558, + 402 + ], + "spans": [ + { + "bbox": [ + 320, + 363, + 558, + 402 + ], + "type": "text", + "content": "[39] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. 2024. Openai o1 system card. arXiv preprint arXiv:2412.16720 (2024)." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 320, + 403, + 558, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 403, + 558, + 452 + ], + "spans": [ + { + "bbox": [ + 320, + 403, + 558, + 452 + ], + "type": "text", + "content": "[40] Naman Jain, King Han, Alex Gu, Wen-Ding Li, Fanjia Yan, Tianjun Zhang, Sida Wang, Armando Solar-Lezama, Koushik Sen, and Ion Stoica. 2024. Livecodebench: Holistic and contamination free evaluation of large language models for code. arXiv preprint arXiv:2403.07974 (2024)." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 320, + 453, + 558, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 453, + 558, + 492 + ], + "spans": [ + { + "bbox": [ + 320, + 453, + 558, + 492 + ], + "type": "text", + "content": "[41] Soyeong Jeong, Jinheon Baek, Sukmin Cho, Sung Ju Hwang, and Jong C Park. 2024. Adaptive-rag: Learning to adapt retrieval-augmented large language models through question complexity arXiv preprint arXiv:2403.14403 (2024)." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 320, + 493, + 558, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 493, + 558, + 521 + ], + "spans": [ + { + "bbox": [ + 320, + 493, + 558, + 521 + ], + "type": "text", + "content": "[42] Pengcheng Jiang. 2025. DeepRetrieval: Powerful Query Generation for Information Retrieval with Reinforcement Learning. arXiv preprint arXiv:2503.00223 (2025)." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 320, + 522, + 558, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 522, + 558, + 562 + ], + "spans": [ + { + "bbox": [ + 320, + 522, + 558, + 562 + ], + "type": "text", + "content": "[43] Yucheng Jiang, Yijia Shao, Dekun Ma, Sina J Semnani, and Monica S Lam. 2024. Into the unknown unknowns: Engaged human learning through participation in language model agent conversations. arXiv preprint arXiv:2408.15232 (2024)." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 320, + 563, + 558, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 563, + 558, + 601 + ], + "spans": [ + { + "bbox": [ + 320, + 563, + 558, + 601 + ], + "type": "text", + "content": "[44] Yucheng Jiang, Yijia Shao, Dekun Ma, Sina J Semnani, and Monica S Lam. 2024. Into the unknown unknowns: Engaged human learning through participation in language model agent conversations. arXiv preprint arXiv:2408.15232 (2024)." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 320, + 602, + 558, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 602, + 558, + 651 + ], + "spans": [ + { + "bbox": [ + 320, + 602, + 558, + 651 + ], + "type": "text", + "content": "[45] Zhengbao Jiang, Frank F Xu, Luyu Gao, Zhiqing Sun, Qian Liu, Jane Dwivedi-Yu, Yiming Yang, Jamie Callan, and Graham Neubig. 2023 Active retrieval augmented generation. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing 7969-7992." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 320, + 651, + 559, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 651, + 559, + 701 + ], + "spans": [ + { + "bbox": [ + 320, + 651, + 559, + 701 + ], + "type": "text", + "content": "[46] Ashutosh Joshi, Sheikh Muhammad Sarwar, Samarth Varshney, Sreyashi Nag, Shrivats Agrawal, and Juhi Naik. 2024. REAPER: Reasoning based retrieval planning for complex RAG systems. In Proceedings of the 33rd ACM International Conference on Information and Knowledge Management. 4621-4628." + } + ] + } + ], + "index": 37 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "spans": [ + { + "bbox": [ + 525, + 47, + 558, + 56 + ], + "type": "text", + "content": "Gao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 73, + 296, + 712 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 56, + 73, + 296, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 73, + 296, + 123 + ], + "spans": [ + { + "bbox": [ + 56, + 73, + 296, + 123 + ], + "type": "text", + "content": "[47] Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, et al. 2019. Natural questions: a benchmark for question answering research. Transactions of the Association for Computational Linguistics 7 (2019), 453-466." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 124, + 296, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 124, + 296, + 183 + ], + "spans": [ + { + "bbox": [ + 55, + 124, + 296, + 183 + ], + "type": "text", + "content": "[48] Myeonghwa Lee, Seonho An, and Min-Soo Kim. 2024. PlanRAG: A plan-then-retrieval augmented generation for generative large language models as decision makers. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers). 6537–6555." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 184, + 296, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 184, + 296, + 232 + ], + "spans": [ + { + "bbox": [ + 56, + 184, + 296, + 232 + ], + "type": "text", + "content": "[49] Zhicheng Lee, Shulin Cao, Jinxin Liu, Jiajie Zhang, Weichuan Liu, Xiaoyin Che, Lei Hou, and Juanzi Li. 2025. ReaRAG: Knowledge-guided Reasoning Enhances Factuality of Large Reasoning Models with Iterative Retrieval Augmented Generation. arXiv preprint arXiv:2503.21729 (2025)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 233, + 295, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 233, + 295, + 263 + ], + "spans": [ + { + "bbox": [ + 56, + 233, + 295, + 263 + ], + "type": "text", + "content": "[50] Jinzheng Li, Jingshu Zhang, Hongguang Li, and Yiqing Shen. 2024. An Agent Framework for Real-Time Financial Information Searching with Large Language Models. arXiv preprint arXiv:2502.15684 (2024)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 264, + 295, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 264, + 295, + 303 + ], + "spans": [ + { + "bbox": [ + 56, + 264, + 295, + 303 + ], + "type": "text", + "content": "[51] Xiaoxi Li, Guanting Dong, Jiajie Jin, Yuyao Zhang, Yujia Zhou, Yutao Zhu, Peitian Zhang, and Zhicheng Dou. 2025. Search-01: Agentic search-enhanced large reasoning models. arXiv preprint arXiv:2501.05366 (2025)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 303, + 295, + 343 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 303, + 295, + 343 + ], + "spans": [ + { + "bbox": [ + 56, + 303, + 295, + 343 + ], + "type": "text", + "content": "[52] Xingxuan Li, Weiwen Xu, Ruochen Zhao, Fangkai Jiao, Shafiq Joty, and Lidong Bing. 2024. Can We Further Elicit Reasoning in LLMs? Critic-Guided Planning with Retrieval-Augmentation for Solving Challenging Tasks. arXiv preprint arXiv:2410.01428 (2024)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 344, + 295, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 344, + 295, + 384 + ], + "spans": [ + { + "bbox": [ + 56, + 344, + 295, + 384 + ], + "type": "text", + "content": "[53] Xingxuan Li, Weiwen Xu, Ruochen Zhao, Fangkai Jiao, Shafiq Joty, and Lidong Bing. 2024. Can We Further Elicit Reasoning in LLMs? Critic-Guided Planning with Retrieval-Augmentation for Solving Challenging Tasks. arXiv preprint arXiv:2410.01428 (2024)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 384, + 296, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 384, + 296, + 433 + ], + "spans": [ + { + "bbox": [ + 56, + 384, + 296, + 433 + ], + "type": "text", + "content": "[54] Zhuoqun Li, Haiyang Yu, Xuanang Chen, Hongyu Lin, Yaojie Lu, Fei Huang, Xianpei Han, Yongbin Li, and Le Sun. 2025. Deepsolution: Boosting complex engineering solution design via tree-based exploration and bi-point thinking. arXiv preprint arXiv:2502.20730 (2025)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 433, + 295, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 433, + 295, + 472 + ], + "spans": [ + { + "bbox": [ + 56, + 433, + 295, + 472 + ], + "type": "text", + "content": "[55] Hunter Lightman, Vineet Kosaraju, Yuri Burda, Harrison Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. 2023. Let's verify step by step. In *The Twelfth International Conference on Learning Representations*." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 473, + 295, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 473, + 295, + 502 + ], + "spans": [ + { + "bbox": [ + 56, + 473, + 295, + 502 + ], + "type": "text", + "content": "[56] Jiaxiang Liu, Yuan Wang, Jiawei Du, Joey Tianyi Zhou, and Zuozhu Liu. 2024. Medcot: Medical chain of thought via hierarchical expert. arXiv preprint arXiv:2412.13736 (2024)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 503, + 295, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 503, + 295, + 532 + ], + "spans": [ + { + "bbox": [ + 56, + 503, + 295, + 532 + ], + "type": "text", + "content": "[57] Elias Lumer, Pradeep Honaganahalli Basavaraju, Myles Mason, James A Burke, and Vamse Kumar Subbiah. 2025. Graph RAG-Tool Fusion. arXiv preprint arXiv:2502.07223 (2025)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 533, + 295, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 533, + 295, + 572 + ], + "spans": [ + { + "bbox": [ + 56, + 533, + 295, + 572 + ], + "type": "text", + "content": "[58] Haoran Luo, Yikai Guo, Qika Lin, Xiaobao Wu, Xinyu Mu, Wenhao Liu, Meina Song, Yifan Zhu, Luu Anh Tuan, et al. 2025. KBQA-o1: Agentic Knowledge Base Question Answering with Monte Carlo Tree Search. arXiv preprint arXiv:2501.18922 (2025)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 572, + 295, + 621 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 572, + 295, + 621 + ], + "spans": [ + { + "bbox": [ + 56, + 572, + 295, + 621 + ], + "type": "text", + "content": "[59] Yuanjie Lyu, Zhiyu Li, Simin Niu, Feiyu Xiong, Bo Tang, Wenjin Wang, Hao Wu, Huanyong Liu, Tong Xu, and Enhong Chen. 2025. Crud-rag: A comprehensive chinese benchmark for retrieval-augmented generation of large language models. ACM Transactions on Information Systems 43, 2 (2025), 1-32." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 622, + 295, + 671 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 622, + 295, + 671 + ], + "spans": [ + { + "bbox": [ + 56, + 622, + 295, + 671 + ], + "type": "text", + "content": "[60] Shengjie Ma, Chengjin Xu, Xuhui Jiang, Muzhi Li, Huaren Qu, Cehao Yang, Jiaxin Mao, and Jian Guo. 2024. Think-on-Graph 2.0: Deep and Faithful Large Language Model Reasoning with Knowledge-guided Retrieval Augmented Generation. arXiv preprint arXiv:2407.10805 (2024)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 672, + 295, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 672, + 295, + 712 + ], + "spans": [ + { + "bbox": [ + 56, + 672, + 295, + 712 + ], + "type": "text", + "content": "[61] Xinbei Ma, Yeyun Gong, Pengcheng He, Hai Zhao, and Nan Duan. 2023. Query rewriting in retrieval-augmented large language models. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing. 5303-5315." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 320, + 73, + 559, + 700 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 320, + 73, + 559, + 112 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 73, + 559, + 112 + ], + "spans": [ + { + "bbox": [ + 320, + 73, + 559, + 112 + ], + "type": "text", + "content": "[62] Grégoire Mialon, Clémentine Fourrier, Thomas Wolf, Yann LeCun, and Thomas Scialom. 2023. Gaia: a benchmark for general ai assistants. In The Twelfth International Conference on Learning Representations." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 320, + 114, + 559, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 114, + 559, + 152 + ], + "spans": [ + { + "bbox": [ + 320, + 114, + 559, + 152 + ], + "type": "text", + "content": "[63] Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. 2025. s1: Simple test-time scaling. arXiv preprint arXiv:2501.19393 (2025)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 320, + 154, + 559, + 192 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 154, + 559, + 192 + ], + "spans": [ + { + "bbox": [ + 320, + 154, + 559, + 192 + ], + "type": "text", + "content": "[64] Shishir G Patil, Tianjun Zhang, Xin Wang, and Joseph E Gonzalez. 2024. Gorilla: Large language model connected with massive apis. Advances in Neural Information Processing Systems 37 (2024), 126544-126565." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 320, + 194, + 559, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 194, + 559, + 233 + ], + "spans": [ + { + "bbox": [ + 320, + 194, + 559, + 233 + ], + "type": "text", + "content": "[65] Fabio Petroni, Aleksandra Piktus, Angela Fan, Patrick Lewis, Majid Yazdani, Nicola De Cao, James Thorne, Yacine Jernite, Vladimir Karpukhin, Jean Maillard, et al. 2020. KILT: a benchmark for knowledge intensive language tasks. arXiv preprint arXiv:2009.02252 (2020)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 320, + 234, + 558, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 234, + 558, + 262 + ], + "spans": [ + { + "bbox": [ + 320, + 234, + 558, + 262 + ], + "type": "text", + "content": "[66] Pouya Pezeshkpour and Estevam Hruschka. 2025. Insight-RAG: Enhancing LLMs with Insight-Driven Augmentation. arXiv preprint arXiv:2504.00187 (2025)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 320, + 264, + 558, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 264, + 558, + 303 + ], + "spans": [ + { + "bbox": [ + 320, + 264, + 558, + 303 + ], + "type": "text", + "content": "[67] David Rein, Betty Li Hou, Asa Cooper Stickland, Jackson Petty, Richard Yuanzhe Pang, Julien Dirani, Julian Michael, and Samuel R Bowman. 2024. Gpqa: A graduate-level google-proof q&a benchmark. In First Conference on Language Modeling." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 320, + 303, + 558, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 303, + 558, + 342 + ], + "spans": [ + { + "bbox": [ + 320, + 303, + 558, + 342 + ], + "type": "text", + "content": "[68] Zhihong Shao, Yeyun Gong, Yelong Shen, Minlie Huang, Nan Duan, and Weizhu Chen. 2023. Enhancing retrieval-augmented large language models with iterative retrieval-generation synergy. arXiv preprint arXiv:2305.15294 (2023)." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 320, + 343, + 558, + 383 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 343, + 558, + 383 + ], + "spans": [ + { + "bbox": [ + 320, + 343, + 558, + 383 + ], + "type": "text", + "content": "[69] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300 (2024)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 320, + 384, + 558, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 384, + 558, + 412 + ], + "spans": [ + { + "bbox": [ + 320, + 384, + 558, + 412 + ], + "type": "text", + "content": "[70] Quan Shi, Michael Tang, Karthik Narasimhan, and Shunyu Yao. 2024. Can Language Models Solve Olympiad Programming? arXiv preprint arXiv:2404.10952 (2024)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 320, + 413, + 558, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 413, + 558, + 442 + ], + "spans": [ + { + "bbox": [ + 320, + 413, + 558, + 442 + ], + "type": "text", + "content": "[71] Quan Shi, Michael Tang, Karthik Narasimhan, and Shunyu Yao. 2024. Can Language Models Solve Olympiad Programming? arXiv preprint arXiv:2404.10952 (2024)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 320, + 443, + 558, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 443, + 558, + 482 + ], + "spans": [ + { + "bbox": [ + 320, + 443, + 558, + 482 + ], + "type": "text", + "content": "[72] Huatong Song, Jinhao Jiang, Yingqian Min, Jie Chen, Zhipeng Chen, Wayne Xin Zhao, Lei Fang, and Ji-Rong Wen. 2025. R1-Searcher: Incentivizing the Search Capability in LLMs via Reinforcement Learning. arXiv preprint arXiv:2503.05592 (2025)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 320, + 483, + 558, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 483, + 558, + 521 + ], + "spans": [ + { + "bbox": [ + 320, + 483, + 558, + 521 + ], + "type": "text", + "content": "[73] Sakhinana Sagar Srinivas and Venkataramana Runkana. 2025. Scaling Test-Time Inference with Policy-Optimized, Dynamic Retrieval-Augmented Generation via KV Caching and Decoding. arXiv preprint arXiv:2504.01281 (2025)." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 320, + 522, + 558, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 522, + 558, + 563 + ], + "spans": [ + { + "bbox": [ + 320, + 522, + 558, + 563 + ], + "type": "text", + "content": "[74] Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Hanjie Chen, Xia Hu, et al. 2025. Stop overthinking: A survey on efficient reasoning for large language models. arXiv preprint arXiv:2503.16419 (2025)." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 320, + 563, + 558, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 563, + 558, + 602 + ], + "spans": [ + { + "bbox": [ + 320, + 563, + 558, + 602 + ], + "type": "text", + "content": "[75] Zhongxiang Sun, Qipeng Wang, Weijie Yu, Xiaoxue Zang, Kai Zheng, Jun Xu, Xiao Zhang, Song Yang, and Han Li. 2025. ReARTeR: Retrieval-Augmented Reasoning with Trustworthy Process Rewarding. arXiv preprint arXiv:2501.07861 (2025)." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 320, + 602, + 558, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 602, + 558, + 632 + ], + "spans": [ + { + "bbox": [ + 320, + 602, + 558, + 632 + ], + "type": "text", + "content": "[76] Alon Talmor and Jonathan Berant. 2018. The web as a knowledge-base for answering complex questions. arXiv preprint arXiv:1803.06643 (2018)." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 320, + 632, + 558, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 632, + 558, + 662 + ], + "spans": [ + { + "bbox": [ + 320, + 632, + 558, + 662 + ], + "type": "text", + "content": "[77] Hieu Tran, Zonghai Yao, Junda Wang, Yifan Zhang, Zhichao Yang, and Hong Yu. 2024. RARE: Retrieval-Augmented Reasoning Enhancement for Large Language Models. arXiv preprint arXiv:2412.02830 (2024)." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 320, + 662, + 558, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 662, + 558, + 700 + ], + "spans": [ + { + "bbox": [ + 320, + 662, + 558, + 700 + ], + "type": "text", + "content": "[78] Harsh Trivedi, Niranjan Balasubramanian, Tushar Khot, and Ashish Sabharwal. 2022. Interleaving retrieval with chain-of-thought reasoning for knowledge-intensive multi-step questions. arXiv preprint arXiv:2212.10509 (2022)." + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "text", + "content": "Synergizing RAG and Reasoning: A Systematic Review" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 399, + 47, + 558, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 47, + 558, + 57 + ], + "spans": [ + { + "bbox": [ + 399, + 47, + 558, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 73, + 296, + 700 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 56, + 73, + 296, + 113 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 73, + 296, + 113 + ], + "spans": [ + { + "bbox": [ + 56, + 73, + 296, + 113 + ], + "type": "text", + "content": "[79] Harsh Trivedi, Niranjan Balasubramanian, Tushar Khot, and Ashish Sabharwal. 2022. MuSiQue: Multihop Questions via Single-hop Question Composition. Transactions of the Association for Computational Linguistics 10 (2022), 539-554." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 114, + 296, + 153 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 114, + 296, + 153 + ], + "spans": [ + { + "bbox": [ + 55, + 114, + 296, + 153 + ], + "type": "text", + "content": "[80] Tu Vu, Mohit Iyyer, Xuezhi Wang, Noah Constant, Jerry Wei, Jason Wei, Chris Tar, Yun-Hsuan Sung, Denny Zhou, Quoc Le, et al. 2023. Freshllms: Refreshing large language models with search engine augmentation. arXiv preprint arXiv:2310.03214 (2023)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 154, + 294, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 154, + 294, + 194 + ], + "spans": [ + { + "bbox": [ + 56, + 154, + 294, + 194 + ], + "type": "text", + "content": "[81] Ante Wang, Linfeng Song, Ye Tian, Dian Yu, Haitao Mi, Xiangyu Duan, Zhaopeng Tu, Jinsong Su, and Dong Yu. 2025. Don't Get Lost in the Trees: Streamlining LLM Reasoning by Overcoming Tree Search Exploration Pitfalls. arXiv preprint arXiv:2502.11183 (2025)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 194, + 295, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 194, + 295, + 223 + ], + "spans": [ + { + "bbox": [ + 56, + 194, + 295, + 223 + ], + "type": "text", + "content": "[82] Jinyu Wang, Jingjing Fu, Rui Wang, Lei Song, and Jiang Bian. 2025. PIKE-RAG: sPecialized Knowledge and Rationale Augmented Generation. arXiv preprint arXiv:2501.11551 (2025)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 224, + 295, + 253 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 224, + 295, + 253 + ], + "spans": [ + { + "bbox": [ + 56, + 224, + 295, + 253 + ], + "type": "text", + "content": "[83] Liang Wang, Haonan Chen, Nan Yang, Xiaolong Huang, Zhicheng Dou, and Furu Wei. 2025. Chain-of-Retrieval Augmented Generation. arXiv preprint arXiv:2501.14342 (2025)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 254, + 295, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 254, + 295, + 302 + ], + "spans": [ + { + "bbox": [ + 56, + 254, + 295, + 302 + ], + "type": "text", + "content": "[84] Ruobing Wang, Daren Zha, Shi Yu, Qingfei Zhao, Yuxuan Chen, Yixuan Wang, Shuo Wang, Yukun Yan, Zhenghao Liu, Xu Han, et al. 2024. Retriever-and-Memory: Towards Adaptive Note-Enhanced Retrieval-Augmented Generation. arXiv preprint arXiv:2410.08821 (2024)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 303, + 295, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 303, + 295, + 342 + ], + "spans": [ + { + "bbox": [ + 56, + 303, + 295, + 342 + ], + "type": "text", + "content": "[85] Siqi Wang, Chao Liang, Yunfan Gao, Yang Liu, Jing Li, and Haofen Wang. 2024. Decoding Urban Industrial Complexity: Enhancing Knowledge-Driven Insights via IndustryScopeGPT. In Proceedings of the 32nd ACM International Conference on Multimedia. 4757-4765." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 343, + 295, + 383 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 343, + 295, + 383 + ], + "spans": [ + { + "bbox": [ + 56, + 343, + 295, + 383 + ], + "type": "text", + "content": "[86] Shuting Wang, Jiongnan Liu, Shiren Song, Jiehan Cheng, Yuqi Fu, Peidong Guo, Kun Fang, Yutao Zhu, and Zhicheng Dou. 2024. Domainrag: A chinese benchmark for evaluating domain-specific retrieval-augmented generation. arXiv preprint arXiv:2406.05654 (2024)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 384, + 295, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 384, + 295, + 422 + ], + "spans": [ + { + "bbox": [ + 56, + 384, + 295, + 422 + ], + "type": "text", + "content": "[87] Xidong Wang, Guiming Hardy Chen, Dingjie Song, Zhiyi Zhang, Zhihong Chen, Qingying Xiao, Feng Jiang, Jianquan Li, Xiang Wan, Benyou Wang, et al. 2023. Cmb: A comprehensive medical benchmark in chinese. arXiv preprint arXiv:2308.08833 (2023)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 423, + 295, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 423, + 295, + 462 + ], + "spans": [ + { + "bbox": [ + 56, + 423, + 295, + 462 + ], + "type": "text", + "content": "[88] Xiaohua Wang, Zhenghua Wang, Xuan Gao, Feiran Zhang, Yixin Wu, Zhibo Xu, Tianyuan Shi, Zhengyuan Wang, Shizheng Li, Qi Qian, et al. 2024. Searching for best practices in retrieval-augmented generation. arXiv preprint arXiv:2407.01219 (2024)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 463, + 295, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 463, + 295, + 502 + ], + "spans": [ + { + "bbox": [ + 56, + 463, + 295, + 502 + ], + "type": "text", + "content": "[89] Zheng Wang, Zhongyang Li, Zeren Jiang, Dandan Tu, and Wei Shi. 2024. Crafting Personalized Agents through Retrieval-Augmented Generation on Editable Memory Graphs. arXiv preprint arXiv:2409.19401 (2024)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 502, + 295, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 502, + 295, + 542 + ], + "spans": [ + { + "bbox": [ + 56, + 502, + 295, + 542 + ], + "type": "text", + "content": "[90] Zhengren Wang, Jiayang Yu, Dongsheng Ma, Zhe Chen, Yu Wang, Zhiyu Li, Feiyu Xiong, Yanfeng Wang, Linpeng Tang, Wentao Zhang, et al. 2025. RARE: Retrieval-Augmented Reasoning Modeling. arXiv preprint arXiv:2503.23513 (2025)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 543, + 295, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 543, + 295, + 582 + ], + "spans": [ + { + "bbox": [ + 56, + 543, + 295, + 582 + ], + "type": "text", + "content": "[91] Yixuan Weng, Minjun Zhu, Guangsheng Bao, Hongbo Zhang, Jindong Wang, Yue Zhang, and Linyi Yang. 2024. Cyclereresearcher: Improving automated research via automated review. arXiv preprint arXiv:2411.00816 (2024)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 582, + 295, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 582, + 295, + 611 + ], + "spans": [ + { + "bbox": [ + 56, + 582, + 295, + 611 + ], + "type": "text", + "content": "[92] Junde Wu, Jiayuan Zhu, and Yuyuan Liu. 2025. Agentic Reasoning: Reasoning LLMs with Tools for the Deep Research. arXiv preprint arXiv:2502.04644 (2025)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 612, + 295, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 612, + 295, + 651 + ], + "spans": [ + { + "bbox": [ + 56, + 612, + 295, + 651 + ], + "type": "text", + "content": "[93] Wenjie Wu, Yongcheng Jing, Yingjie Wang, Wenbin Hu, and Dacheng Tao. 2025. Graph-augmented reasoning: Evolving step-by-step knowledge graph retrieval for llm reasoning. arXiv preprint arXiv:2503.01642 (2025)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 56, + 652, + 295, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 652, + 295, + 700 + ], + "spans": [ + { + "bbox": [ + 56, + 652, + 295, + 700 + ], + "type": "text", + "content": "[94] Zekun Xi, Wenbiao Yin, Jizhan Fang, Jialong Wu, Runnan Fang, Ningyu Zhang, Jiang Yong, Pengjun Xie, Fei Huang, and Huajun Chen. 2025. OmniThink: Expanding Knowledge Boundaries in Machine Writing through Thinking. arXiv preprint arXiv:2501.09751 (2025)." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 317, + 73, + 558, + 681 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 320, + 73, + 558, + 113 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 73, + 558, + 113 + ], + "spans": [ + { + "bbox": [ + 320, + 73, + 558, + 113 + ], + "type": "text", + "content": "[95] Liang Xiao, Wen Dai, Shuai Chen, Bin Qin, Chongyang Shi, Haopeng Jing, and Tianyu Guo. 2025. Retrieval-Augmented Generation by Evidence Retroactivity in LLMs. arXiv preprint arXiv:2501.05475 (2025)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 320, + 114, + 558, + 152 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 114, + 558, + 152 + ], + "spans": [ + { + "bbox": [ + 320, + 114, + 558, + 152 + ], + "type": "text", + "content": "[96] Guangzhi Xiong, Qiao Jin, Xiao Wang, Yin Fang, Haolin Liu, Yifan Yang, Fangyuan Chen, Zhixing Song, Dengyu Wang, Minjia Zhang, et al. 2025. Rag-gym: Optimizing reasoning and search agents with process supervision. arXiv preprint arXiv:2502.13957 (2025)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 320, + 153, + 558, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 153, + 558, + 183 + ], + "spans": [ + { + "bbox": [ + 320, + 153, + 558, + 183 + ], + "type": "text", + "content": "[97] Guanming Xiong, Haochen Li, and Wen Zhao. 2025. MCTS-KBQA: Monte Carlo Tree Search for Knowledge Base Question Answering. arXiv preprint arXiv:2502.13428 (2025)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 320, + 184, + 558, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 184, + 558, + 223 + ], + "spans": [ + { + "bbox": [ + 320, + 184, + 558, + 223 + ], + "type": "text", + "content": "[98] Ruibin Xiong, Yimeng Chen, Dmitrii Khizbullin, and Jürgen Schmidhuber. 2025. Beyond Outlining: Heterogeneous Recursive Planning for Adaptive Long-form Writing with Language Models. arXiv preprint arXiv:2503.08275 (2025)." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 320, + 224, + 558, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 224, + 558, + 272 + ], + "spans": [ + { + "bbox": [ + 320, + 224, + 558, + 272 + ], + "type": "text", + "content": "[99] Fengli Xu, Qianyue Hao, Zefang Zong, Jingwei Wang, Yunke Zhang, Jingyi Wang, Xiaochong Lan, Jiahui Gong, Tianjian Ouyang, Fanjin Meng, et al. 2025. Towards Large Reasoning Models: A Survey of Reinforced Reasoning with Large Language Models. arXiv preprint arXiv:2501.09686 (2025)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 273, + 558, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 273, + 558, + 322 + ], + "spans": [ + { + "bbox": [ + 317, + 273, + 558, + 322 + ], + "type": "text", + "content": "[100] Zhipeng Xu, Zhenghao Liu, Yukun Yan, Shuo Wang, Shi Yu, Zheni Zeng, Chaojun Xiao, Zhiyuan Liu, Ge Yu, and Chenyan Xiong. 2024. ActiveRAG: Autonomous Knowledge Assimilation and Accommodation through Retrieval-Augmented Agents. arXiv preprint arXiv:2402.13547 (2024)." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 323, + 558, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 323, + 558, + 342 + ], + "spans": [ + { + "bbox": [ + 317, + 323, + 558, + 342 + ], + "type": "text", + "content": "[101] Ruiran Yan, Zheng Liu, and Defu Lian. 2025. O1 embedder: Let retrievers think before action. arXiv preprint arXiv:2502.07555 (2025)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 343, + 558, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 343, + 558, + 382 + ], + "spans": [ + { + "bbox": [ + 317, + 343, + 558, + 382 + ], + "type": "text", + "content": "[102] Xiaoming Zhang, Ming Wang, Xiaocui Yang, Daling Wang, Shi Feng, and Yifei Zhang. 2024. Hierarchical Retrieval-Augmented Generation Model with Rethink for Multi-hop Question Answering. arXiv preprint arXiv:2408.11875 (2024)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 383, + 558, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 383, + 558, + 422 + ], + "spans": [ + { + "bbox": [ + 317, + 383, + 558, + 422 + ], + "type": "text", + "content": "[103] Zhuocheng Zhang, Yang Feng, and Min Zhang. 2025. LevelRAG: Enhancing Retrieval-Augmented Generation with Multi-hop Logic Planning over Rewriting Augmented Searchers. arXiv preprint arXiv:2502.18139 (2025)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 422, + 558, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 422, + 558, + 453 + ], + "spans": [ + { + "bbox": [ + 317, + 422, + 558, + 453 + ], + "type": "text", + "content": "[104] Bowen Zhao, Zander Brumbaugh, Yizhong Wang, Hannaneh Hajishirzi, and Noah A Smith. 2024. Set the clock: Temporal alignment of pretrained language models. arXiv preprint arXiv:2402.16797 (2024)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 453, + 558, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 453, + 558, + 491 + ], + "spans": [ + { + "bbox": [ + 317, + 453, + 558, + 491 + ], + "type": "text", + "content": "[105] Xuejiao Zhao, Siyan Liu, Su-Yin Yang, and Chunyan Miao. 2025. MedRAG: Enhancing Retrieval-augmented Generation with Knowledge Graph-Elicited Reasoning for Healthcare Copilot. arXiv preprint arXiv:2502.04413 (2025)." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 492, + 558, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 492, + 558, + 532 + ], + "spans": [ + { + "bbox": [ + 317, + 492, + 558, + 532 + ], + "type": "text", + "content": "[106] Yuxiang Zheng, Dayuan Fu, Xiangkun Hu, Xiaojie Cai, Lyumanshan Ye, Pengrui Lu, and Pengfei Liu. 2025. DeepResearcher: Scaling Deep Research via Reinforcement Learning in Real-world Environments. arXiv preprint arXiv:2504.03160 (2025)." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 317, + 533, + 558, + 571 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 533, + 558, + 571 + ], + "spans": [ + { + "bbox": [ + 317, + 533, + 558, + 571 + ], + "type": "text", + "content": "[107] Yijie Zhong, Feifan Wu, Mengying Guo, Xiaolian Zhang, Meng Wang, and Haofen Wang. 2025. Meta-PKE: Memory-Enhanced Task-Adaptive Personal Knowledge Extraction in Daily Life. Information Processing & Management 62, 4 (2025), 104097." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 317, + 572, + 558, + 601 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 572, + 558, + 601 + ], + "spans": [ + { + "bbox": [ + 317, + 572, + 558, + 601 + ], + "type": "text", + "content": "[108] Yujia Zhou, Zheng Liu, Jiajie Jin, Jian-Yun Nie, and Zhicheng Dou. 2024. Metacognitive retrieval-augmented large language models. In Proceedings of the ACM Web Conference 2024. 1453-1463." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 317, + 602, + 558, + 641 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 602, + 558, + 641 + ], + "spans": [ + { + "bbox": [ + 317, + 602, + 558, + 641 + ], + "type": "text", + "content": "[109] Jiachen Zhu, Congmin Zheng, Jianghao Lin, Kounianhua Du, Ying Wen, Yong Yu, Jun Wang, and Weinan Zhang. 2025. Retrieval-Augmented Process Reward Model for Generalizable Mathematical Reasoning. arXiv preprint arXiv:2502.14361 (2025)." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 317, + 642, + 558, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 642, + 558, + 681 + ], + "spans": [ + { + "bbox": [ + 317, + 642, + 558, + 681 + ], + "type": "text", + "content": "[110] Rongzhi Zhu, Xiangyu Liu, Zequn Sun, Yiwei Wang, and Wei Hu. 2025. Mitigating Lost-in-Retrieval Problems in Retrieval Augmented Multi-Hop Question Answering. arXiv preprint arXiv:2502.14245 (2025)." + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "spans": [ + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "type": "text", + "content": "Gao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 72, + 108, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 72, + 108, + 85 + ], + "spans": [ + { + "bbox": [ + 51, + 72, + 108, + 85 + ], + "type": "text", + "content": "Appendix" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 87, + 232, + 99 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 87, + 232, + 99 + ], + "spans": [ + { + "bbox": [ + 51, + 87, + 232, + 99 + ], + "type": "text", + "content": "Agentic RAG Symbol Reference System" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 102, + 294, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 102, + 294, + 139 + ], + "spans": [ + { + "bbox": [ + 50, + 102, + 294, + 139 + ], + "type": "text", + "content": "The following table presents a complete symbol reference system with formally defined mathematical notations for all core concepts." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 147, + 170, + 159 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 147, + 170, + 159 + ], + "spans": [ + { + "bbox": [ + 51, + 147, + 170, + 159 + ], + "type": "text", + "content": "Symbol Design Hierarchy" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 162, + 294, + 210 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 67, + 162, + 251, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 162, + 251, + 174 + ], + "spans": [ + { + "bbox": [ + 67, + 162, + 251, + 174 + ], + "type": "text", + "content": "- Base states/actions: Standard font " + }, + { + "bbox": [ + 67, + 162, + 251, + 174 + ], + "type": "inline_equation", + "content": "(S_{t},a_{t})" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 174, + 235, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 174, + 235, + 186 + ], + "spans": [ + { + "bbox": [ + 67, + 174, + 235, + 186 + ], + "type": "text", + "content": "- Sets/spaces: Calligraphic font " + }, + { + "bbox": [ + 67, + 174, + 235, + 186 + ], + "type": "inline_equation", + "content": "(\\mathcal{A},\\mathcal{K}_t)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 186, + 294, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 186, + 294, + 198 + ], + "spans": [ + { + "bbox": [ + 67, + 186, + 294, + 198 + ], + "type": "text", + "content": "- Core mechanism functions: Uppercase Greek " + }, + { + "bbox": [ + 67, + 186, + 294, + 198 + ], + "type": "inline_equation", + "content": "(\\Psi, \\Gamma)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 198, + 281, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 198, + 281, + 210 + ], + "spans": [ + { + "bbox": [ + 67, + 198, + 281, + 210 + ], + "type": "text", + "content": "- Operational functions: Calligraphic font " + }, + { + "bbox": [ + 67, + 198, + 281, + 210 + ], + "type": "inline_equation", + "content": "(\\mathcal{R},\\mathcal{T}_a)" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 331, + 72, + 560, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 72, + 560, + 96 + ], + "spans": [ + { + "bbox": [ + 331, + 72, + 560, + 96 + ], + "type": "text", + "content": "- Auxiliary functions: Lowercase Greek " + }, + { + "bbox": [ + 331, + 72, + 560, + 96 + ], + "type": "inline_equation", + "content": "(\\delta, \\phi)" + }, + { + "bbox": [ + 331, + 72, + 560, + 96 + ], + "type": "text", + "content": " or blackboard bold (I)" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 315, + 106, + 422, + 116 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 106, + 422, + 116 + ], + "spans": [ + { + "bbox": [ + 315, + 106, + 422, + 116 + ], + "type": "text", + "content": "Annotation Guidelines" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 331, + 121, + 451, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 121, + 451, + 132 + ], + "spans": [ + { + "bbox": [ + 331, + 121, + 451, + 132 + ], + "type": "text", + "content": "- Symbol disambiguation:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 339, + 133, + 558, + 168 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 339, + 133, + 556, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 133, + 556, + 144 + ], + "spans": [ + { + "bbox": [ + 339, + 133, + 556, + 144 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 339, + 133, + 556, + 144 + ], + "type": "inline_equation", + "content": "\\mathcal{R}" + }, + { + "bbox": [ + 339, + 133, + 556, + 144 + ], + "type": "text", + "content": " strictly denotes retrieval function (vs. reward " + }, + { + "bbox": [ + 339, + 133, + 556, + 144 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 339, + 133, + 556, + 144 + ], + "type": "text", + "content": ")" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 339, + 144, + 558, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 144, + 558, + 168 + ], + "spans": [ + { + "bbox": [ + 339, + 144, + 558, + 168 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 339, + 144, + 558, + 168 + ], + "type": "inline_equation", + "content": "\\delta" + }, + { + "bbox": [ + 339, + 144, + 558, + 168 + ], + "type": "text", + "content": " exclusively represents state transitions (vs. branch selector " + }, + { + "bbox": [ + 339, + 144, + 558, + 168 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 339, + 144, + 558, + 168 + ], + "type": "text", + "content": ")" + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 331, + 169, + 436, + 180 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 169, + 436, + 180 + ], + "spans": [ + { + "bbox": [ + 331, + 169, + 436, + 180 + ], + "type": "text", + "content": "- Dynamic extensions:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 339, + 180, + 558, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 339, + 180, + 558, + 205 + ], + "spans": [ + { + "bbox": [ + 339, + 180, + 558, + 205 + ], + "type": "text", + "content": "- Action space " + }, + { + "bbox": [ + 339, + 180, + 558, + 205 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 339, + 180, + 558, + 205 + ], + "type": "text", + "content": " and knowledge base " + }, + { + "bbox": [ + 339, + 180, + 558, + 205 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_t" + }, + { + "bbox": [ + 339, + 180, + 558, + 205 + ], + "type": "text", + "content": " support incremental updates: " + }, + { + "bbox": [ + 339, + 180, + 558, + 205 + ], + "type": "inline_equation", + "content": "\\mathcal{K}_{t + 1} = \\mathcal{K}_t\\oplus \\mathrm{Retrieve}(q_t)" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 233, + 57 + ], + "type": "text", + "content": "Synergizing RAG and Reasoning: A Systematic Review" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 399, + 47, + 559, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 47, + 559, + 57 + ], + "spans": [ + { + "bbox": [ + 399, + 47, + 559, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 101, + 93, + 509, + 163 + ], + "blocks": [ + { + "bbox": [ + 211, + 71, + 399, + 83 + ], + "lines": [ + { + "bbox": [ + 211, + 71, + 399, + 83 + ], + "spans": [ + { + "bbox": [ + 211, + 71, + 399, + 83 + ], + "type": "text", + "content": "Table 3. Basic states and system components" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 101, + 93, + 509, + 163 + ], + "lines": [ + { + "bbox": [ + 101, + 93, + 509, + 163 + ], + "spans": [ + { + "bbox": [ + 101, + 93, + 509, + 163 + ], + "type": "table", + "html": "
SymbolTypeDefinition & Description
St=(Ht,Ct)Composite stateComplete system state at timestep t, containing historical information and context vectors
HtVector/SetHistorical information aggregation
CtVectorContextual embedding vectors
qtVectorVector representation of current query at step t
KtSetDynamic knowledge base ( Initialized as K0=∅)
", + "image_path": "ea63eb4b161da6fe9a953b4d2d131a0946aee8e4aa2d5f88da7c9c7c4c90820f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 102, + 193, + 509, + 242 + ], + "blocks": [ + { + "bbox": [ + 212, + 169, + 398, + 182 + ], + "lines": [ + { + "bbox": [ + 212, + 169, + 398, + 182 + ], + "spans": [ + { + "bbox": [ + 212, + 169, + 398, + 182 + ], + "type": "text", + "content": "Table 4. Action space and policy definitions" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 102, + 193, + 509, + 242 + ], + "lines": [ + { + "bbox": [ + 102, + 193, + 509, + 242 + ], + "spans": [ + { + "bbox": [ + 102, + 193, + 509, + 242 + ], + "type": "table", + "html": "
SymbolTypeDefinition & Description
ASetAction space, e.g., A = {Retrieve, Generate, Verify, Terminate}
atScalarSelected action at timestep t (at ∈ A)
π(St; Θ)FunctionPolicy function with parameters Θ, mapping states to action probability distributions (π: S → Δ(A))
", + "image_path": "3b93db755ec01b5c451d164e3cc6cc14ab28488e52aa26dae925d402e660ea21.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 100, + 271, + 507, + 342 + ], + "blocks": [ + { + "bbox": [ + 227, + 248, + 382, + 258 + ], + "lines": [ + { + "bbox": [ + 227, + 248, + 382, + 258 + ], + "spans": [ + { + "bbox": [ + 227, + 248, + 382, + 258 + ], + "type": "text", + "content": "Table 5. State transition mechanisms" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 100, + 271, + 507, + 342 + ], + "lines": [ + { + "bbox": [ + 100, + 271, + 507, + 342 + ], + "spans": [ + { + "bbox": [ + 100, + 271, + 507, + 342 + ], + "type": "table", + "html": "
SymbolTypeDefinition & Description
δFunctionState transition function, update rule St+1 = δ(St, ·)
TaFunctionLow-level state transition operation for action a (e.g., TRetrieve denotes retrieval)
RFunctionRetrieval function, R(St) returns retrieval results
OperatorFunction composition operator (e.g., f∘g(x) = f(g(x)))
", + "image_path": "1d848e6509a2fa6c7cfdf8b5bb7b9054f778a70c266ccc426354134081ddf86d.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 126, + 372, + 484, + 450 + ], + "blocks": [ + { + "bbox": [ + 203, + 349, + 406, + 361 + ], + "lines": [ + { + "bbox": [ + 203, + 349, + 406, + 361 + ], + "spans": [ + { + "bbox": [ + 203, + 349, + 406, + 361 + ], + "type": "text", + "content": "Table 6. Feedback and optimization components" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 126, + 372, + 484, + 450 + ], + "lines": [ + { + "bbox": [ + 126, + 372, + 484, + 450 + ], + "spans": [ + { + "bbox": [ + 126, + 372, + 484, + 450 + ], + "type": "table", + "html": "
SymbolTypeDefinition & Description
R(St, at, St+1)FunctionReward function, outputs reward value rt
I(·)FunctionIndicator function (returns 1 if condition holds, else 0)
∇θJ(θ)OperatorPolicy gradient for optimizing policy parameters Θ
γScalarDiscount factor for cumulative reward calculation
", + "image_path": "4d1bcbeaa0e278e95266fc849a9e6ebfbf2b494143d1d43c264d056f0d8e6ecf.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 102, + 480, + 509, + 562 + ], + "blocks": [ + { + "bbox": [ + 227, + 456, + 383, + 469 + ], + "lines": [ + { + "bbox": [ + 227, + 456, + 383, + 469 + ], + "spans": [ + { + "bbox": [ + 227, + 456, + 383, + 469 + ], + "type": "text", + "content": "Table 7. Submodule-specific symbols" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 102, + 480, + 509, + 562 + ], + "lines": [ + { + "bbox": [ + 102, + 480, + 509, + 562 + ], + "spans": [ + { + "bbox": [ + 102, + 480, + 509, + 562 + ], + "type": "table", + "html": "
SymbolTypeDefinition & Description
ΨFunctionReasoning function, generates intermediate reasoning results
ΓFunctionDecision function, produces final outputs (e.g., answers)
ψ(·)FunctionBranch selector for reflective reasoning path selection
φ(·)FunctionConfidence mapping function (evaluations to scalar confidence)
τScalarDecision threshold for triggering specific operations (e.g., verification/termination)
", + "image_path": "d1f3e9f6c15da1afab15a3d990fdc1295647b11a1816d189b65c2a07934958e2.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "spans": [ + { + "bbox": [ + 52, + 47, + 212, + 57 + ], + "type": "text", + "content": "Conference'17, July 2017, Washington, DC, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "spans": [ + { + "bbox": [ + 525, + 47, + 558, + 57 + ], + "type": "text", + "content": "Gao et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 35 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_20xxx/2504.20064/3484c7dd-5472-4f2c-ab8a-d4410cc59cb3_content_list.json b/data/2025/2504_20xxx/2504.20064/3484c7dd-5472-4f2c-ab8a-d4410cc59cb3_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..0ed3e106cba80c5a2d5a18dc11220b53f2eea7e3 --- /dev/null +++ b/data/2025/2504_20xxx/2504.20064/3484c7dd-5472-4f2c-ab8a-d4410cc59cb3_content_list.json @@ -0,0 +1,1371 @@ +[ + { + "type": "text", + "text": "Against Opacity: Explainable AI and Large Language Models for Effective Digital Advertising", + "text_level": 1, + "bbox": [ + 86, + 99, + 910, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Qi Yang", + "bbox": [ + 197, + 162, + 264, + 179 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "yangqi@itmo.ru", + "bbox": [ + 174, + 180, + 285, + 194 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ITMO University", + "bbox": [ + 173, + 195, + 289, + 208 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Saint Petersburg, Russia", + "bbox": [ + 148, + 210, + 312, + 224 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Marlo Ongpin", + "bbox": [ + 441, + 162, + 557, + 179 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "marlo@somin.ai", + "bbox": [ + 441, + 180, + 555, + 193 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "SoMin.ai Research", + "bbox": [ + 437, + 195, + 560, + 207 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Singapore, Singapore", + "bbox": [ + 426, + 210, + 571, + 224 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Sergey Nikolenko", + "bbox": [ + 694, + 162, + 839, + 179 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "sergey@logic.pdmi.ras.ru", + "bbox": [ + 681, + 180, + 852, + 194 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ITMO University", + "bbox": [ + 709, + 195, + 826, + 209 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Steklov Institute of Mathematics", + "bbox": [ + 658, + 210, + 875, + 223 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Saint Petersburg, Russia", + "bbox": [ + 686, + 224, + 848, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Alfred Huang", + "bbox": [ + 307, + 251, + 421, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "alfred@somin.ai", + "bbox": [ + 307, + 268, + 421, + 282 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "SoMin.ai Research", + "bbox": [ + 302, + 284, + 426, + 296 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Singapore, Singapore", + "bbox": [ + 292, + 299, + 436, + 313 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Aleksandr Farseev", + "bbox": [ + 557, + 251, + 707, + 266 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "sasha@somin.ai", + "bbox": [ + 576, + 268, + 687, + 281 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "SoMin.ai Research", + "bbox": [ + 568, + 284, + 694, + 296 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Singapore, Singapore", + "bbox": [ + 560, + 299, + 704, + 313 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a4bbad9354d0c96540ac46d8b2ae9e19f85349fb003e3dde95f5c9f708a55630.jpg", + "image_caption": [ + "Figure 1: Overview of the LLM-based advertising analysis framework SODA (Section 3)." + ], + "image_footnote": [], + "bbox": [ + 124, + 329, + 870, + 481 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 83, + 517, + 183, + 530 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The opaqueness of modern digital advertising, exemplified by platforms such as Meta Ads, raises concerns regarding their autonomous control over audience targeting, pricing structures, and ad relevancy assessments. Locked in their leading positions by network effects, \"Metas and Googles of the world\" attract countless advertisers who rely on intuition, with billions of dollars lost on ineffective social media ads. The platforms' algorithms use huge amounts of data unavailable to advertisers, and the algorithms themselves are opaque as well. This lack of transparency hinders the advertisers' ability to make informed decisions and necessitates efforts to promote transparency, standardize industry metrics, and strengthen regulatory frameworks. In this work, we propose novel ways to assist marketers in optimizing their advertising strategies via machine learning techniques designed to analyze and evaluate content, in particular, predict the click-through rates (CTR) of novel advertising content. Another important problem is that large volumes of data available in the competitive landscape, e.g., competitors' ads,", + "bbox": [ + 81, + 535, + 483, + 771 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.", + "bbox": [ + 81, + 780, + 482, + 852 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada", + "bbox": [ + 84, + 853, + 357, + 863 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "© 2023 Copyright held by the owner/author(s). Publication rights licensed to ACM.", + "bbox": [ + 84, + 864, + 472, + 875 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ACM ISBN 979-8-4007-0108-5/23/10...$15.00", + "bbox": [ + 84, + 875, + 294, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://doi.org/10.1145/3581783.3612817", + "bbox": [ + 84, + 883, + 272, + 895 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "impede the ability of marketers to derive meaningful insights. This leads to a pressing need for a novel approach that would allow us to summarize and comprehend complex data. Inspired by the success of ChatGPT in bridging the gap between large language models (LLMs) and a broader non-technical audience, we propose a novel system that facilitates marketers in data interpretation, called SODA, that merges LLMs with explainable AI, enabling better human-AI collaboration with an emphasis on the domain of digital marketing and advertising. By combining LLMs and explainability features, in particular modern text-image models, we aim to improve the synergy between human marketers and AI systems.", + "bbox": [ + 511, + 518, + 915, + 671 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "CCS CONCEPTS", + "text_level": 1, + "bbox": [ + 514, + 681, + 651, + 696 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "- Information systems $\\rightarrow$ Learning to rank; Multimedia and multimodal retrieval; Computational advertising; Multimedia and multimodal retrieval; Computational advertising.", + "bbox": [ + 513, + 700, + 915, + 743 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "KEYWORDS", + "text_level": 1, + "bbox": [ + 514, + 755, + 620, + 768 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Digital Advertising, Ads Performance Prediction, Deep Learning, Large Language Model, Explainable AI", + "bbox": [ + 513, + 773, + 913, + 801 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ACM Reference Format:", + "text_level": 1, + "bbox": [ + 514, + 806, + 661, + 818 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Qi Yang, Marlo Ongpin, Sergey Nikolenko, Alfred Huang, and Aleksandr Farseev. 2023. Against Opacity: Explanable AI and Large Language Models for Effective Digital Advertising. In Proceedings of the 31st ACM International Conference on Multimedia (MM '23), October 29-November 3, 2023, Ottawa, ON, Canada. ACM, New York, NY, USA, 7 pages. https://doi.org/10.1145/3581783.3612817", + "bbox": [ + 513, + 819, + 913, + 893 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.20064v1 [cs.IR] 22 Apr 2025", + "bbox": [ + 22, + 272, + 60, + 708 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 84, + 104, + 256, + 119 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The online advertising industry is the poster child of data science. Google and Facebook became industry-dominating behemoths to a large extent because they excelled at crunching the numbers and showing the best online ads to their primary assets, user audiences, while Amazon did the same for item recommendations in its online store. In academia, the Netflix Prize Competition [2] devoted to movie recommendations was one of the first open competitions with serious prizes and organization, a pioneer that would eventually lead to Kaggle and innumerable open leaderboards that nowadays track the state of the art in virtually every measurable ML task. The Netflix Prize itself has led to significant breakthroughs in collaborative filtering, and its dataset is still used as one of the standard benchmarks [2]. One definitely cannot say that the field of recommender systems, in particular online advertising, lacks the attention of machine learning researchers, and many important advances keep being made every month [6, 11, 18, 29, 33, 36-38].", + "bbox": [ + 81, + 125, + 480, + 345 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, most advances are being made on the side of the platforms (ad marketplaces) such as Facebook (Meta) [19], Google [23], Alibaba [28], or Taobao [17], and therefore they are not accessible to the advertising platform users, i.e., digital marketers. Collaborative filtering datasets are understandably private, and marketing professionals that create advertising content do not have access to the data needed to predict their own future performance. Note that these predictions are often self-fulfilling: if, e.g., Meta models predict low click-through ratio (CTR) for your ad, Meta will charge you more for showing it, probably show it less, and the campaign will likely be a failure regardless of how accurate the CTR prediction has been [1]. Often, there is no practical way to control the cost of advertising; technically, if a platform decided to charge more money for an ad nothing could prevent them from doing so.", + "bbox": [ + 81, + 345, + 482, + 539 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Moreover, even if marketing professionals could run the corresponding models, that would only be of modest help with their job, which is content creation. Suppose that a model tells you that your new ad is a bad match for your audience, and the expected CTR is low. How do you fix that? It cannot be a pure collaborative filtering model since it has to predict CTR for a new ad that has not been shown to users yet, but it is still an opaque model that maps your ad content into a latent representation via \"giant inscrutable matrices\". So all you can do even if you have such a model is to try and make a different ad, get a new prediction, and work via trial and error.", + "bbox": [ + 81, + 539, + 482, + 676 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "One potential way to address this issue involves visualizing the decision-making process of a neural network, providing marketers with insights into the rationale behind specific predictions made by AI models [3, 15, 20, 39]. Therefore, our first contribution in this work is a new variation of a state-of-the-art CTR prediction model coupled with a mechanism for analyzing the ad images (banners) via an image attention mechanism. The results provide human-understandable analysis that can be turned into actionable insights.", + "bbox": [ + 81, + 678, + 482, + 787 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, this is only the beginning. Individual ad analysis via explainable ML models has proven beneficial in scenarios such as individual content evaluation prior to starting an advertising campaign, but it is much less practical when applied to large volumes of images and text ads in real-world settings. The time constraints faced by marketers impede their ability to effectively process and extract key content traits in their own advertising practices.", + "bbox": [ + 81, + 787, + 482, + 885 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In our opinion, the long-awaited revolution in digital advertising and content marketing will occur when both the ads themselves and the results of opaque models can be explained in ways that are both understandable for humans and actionable in terms of business results. We believe that the time for this revolution is now, and in this work we show that large language models such as GPT-3.5 [27] and GPT-4 [26] are already increasingly able to explain the \"reasoning\" behind recommender models and provide aggregate insights about advertising campaigns consisting of hundreds of individual ads. Prior to LLMs, approaches to aggregate text corpora in the context of recommender systems had been proposed via topic modeling [22, 25], sometimes coupled with deep learning [35] and user profiling [5, 34], but topic modeling is based on the bag-of-words assumption and cannot summarize text as an LLM does; visual understanding of ads had also been explored with convolutional networks [32].", + "bbox": [ + 511, + 107, + 913, + 327 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Therefore, our main contribution is that we present preliminary results for a road-map that could achieve this holy grail of content marketing: provide explainable, actionable insights into advertising content along with possible strategies for improvement with models that could work on the side of a small advertising agency rather than a huge platform. We begin with direct CTR prediction and then proceed to provide explainable insights and content recommendations with large language models and even visual generative AI (see Fig. 1).", + "bbox": [ + 511, + 328, + 913, + 452 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The paper is organized as follows: in Section 2, we present an improved model for CTR prediction and visualization procedures for advertising banners, Section 3 introduces our approach to explainable ad analysis with large language models, Section 4 shows the results of a case study that confirms the effectiveness of our approach, and Section 5 concludes the paper.", + "bbox": [ + 511, + 453, + 913, + 536 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 EXPLAINING OPAQUE AI WITH AI: CTR PREDICTION AND VISUALIZATIONS", + "text_level": 1, + "bbox": [ + 513, + 556, + 883, + 585 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The lack of transparency within the advertising sector has been widely acknowledged as a primary reason for the inefficient allocation of advertising budgets. Notably, the responsibility for determining the cost per 1,000 impressions (CPM) and selecting competing entities in a programmatic auction rests primarily with the platform (we will use Meta as the running example). This decision-making process is in fact a result of numerous intricately interwoven machine learning (ML) models designed to dynamically match content with precise targeting criteria and individualized user profiles on Meta. These models are instrumental in estimating the likelihood of a user engaging in specific actions within the Meta ecosystem.", + "bbox": [ + 511, + 590, + 913, + 743 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As an illustration, consider a hypothetical Meta user named Simon who is anticipated to click on an ad (perform the \"Click\" action) with the slogan \"Up your game nights with an ultra-immersive setup\" displayed on a Meta Ad banner showcasing Singtel, a mobile operator company, and their home internet broadband product (Fig. 2). This prediction is done by Meta's internal ML models, and quite often contradicts Meta's widely publicized \"best practices\" blueprints [24]. Here, it is crucial to acknowledge the additional information that advertising engines such as Meta take into account. They are free to use factors such as Simon's past visits to telecom websites, pictures showing computer games in Simon's account", + "bbox": [ + 511, + 743, + 913, + 896 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada", + "bbox": [ + 84, + 75, + 372, + 85 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Qi Yang, Marlo Ongpin, Sergey Nikolenko, Alfred Huang, and Aleksandr Farseev", + "bbox": [ + 531, + 75, + 913, + 87 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/c4b3bb86fba7bd6e20ae39746b404a55331d25eab19f268ee82422c44c38dc6a.jpg", + "image_caption": [ + "Figure 2: A sample advertising banner on Meta." + ], + "image_footnote": [], + "bbox": [ + 122, + 103, + 377, + 297 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "on Meta, and much more. Moreover, these factors include Meta's own revenue considerations, prediction of the ad's \"relevance\" by Meta itself, timing of displaying this ad during the day, recency of the ad account (to incentivize new advertisers with improved performance), and the internal \"ranking\" of advertisers based on their history of disapproved ads, a process overseen by Meta. Regrettably, these predictive estimations are further influenced by the accuracy of Meta's ML models that profile Simon's content. For instance, when Simon is observed putting a diaper on his child, Meta's object recognition system might mistakenly associate it with an \"Inflatable Boat / Fishing\" interest; this is a real-life incident on the Meta platform, and such mistakes compound into suboptimal ad-related predictions down the line.", + "bbox": [ + 81, + 325, + 482, + 505 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Confronted with numerous intricate technical hurdles, digital marketers, who frequently lack technical expertise, often resort to intuitive judgment or a trial-and-error methodology in formulating and examining their creative assets within digital advertising platforms. Thus, it becomes especially important to have comprehensive data-driven guidance, not only for optimizing outcomes but also for developing cost-effective practices.", + "bbox": [ + 81, + 506, + 482, + 603 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "One classical approach to providing this kind of guidance is to train an ML framework to predict the prospective performance of an advertising banner before allocating actual advertising budgets. In this section, we focus on the prediction of the click-through rate (CTR) metric, known to be closely associated with ad performance, particularly in the context of awareness and traffic advertising objectives. We used the recently presented SoWide model [20] as a sample state-of-the-art CTR prediction approach; its architecture is shown in Fig. 3. We updated the architecture slightly by replacing the ABN model for image processing with a Vision Transformer (ViT) [10], resulting in performance improvements, so we call it SoWide-v2.", + "bbox": [ + 81, + 604, + 482, + 768 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Unlike conventional supervised learning, where a data point $(\\mathbf{x},y)$ consists of both feature vector $\\mathbf{x}$ and target variable $y$ , the SoWide-v2 approach incorporates data from the campaign, ad set, and potentially multiple creatives to construct the features for each ad. Data points in the model leverage text and images from all creatives together with their respective estimated performances; in case of videos, we extract keyframes to obtain multiple distinct images included as additional training data. Furthermore, we extract low-level features from tabular, textual, and visual content, resulting", + "bbox": [ + 81, + 768, + 482, + 893 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/112cfbfecb1cc833ca3409d9807c516bac7c5d357b277dc9d792da4090557401.jpg", + "image_caption": [ + "Figure 3: SoWide-v2 architecture" + ], + "image_footnote": [], + "bbox": [ + 450, + 103, + 890, + 297 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "in a comprehensive dataset that can be used to train a model capable of predicting content performance based on information from multiple modalities. After preprocessing, extracted features serve as inputs for the click-through rate (CTR) prediction model. SoWidev2 makes the assumption that the performance of advertisements converges to an underlying global distribution [8, 13, 30], so we normalize CTR values into categorical representations. Predicted scores indicate whether the content can be classified as \"below average\", \"average\", or \"above average\" in terms of quality.", + "bbox": [ + 511, + 325, + 913, + 450 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In essence, SoWide-v2 is a neural network based on the \"wide and deep models\" approach well known in recommender systems [7]. To facilitate representation learning for multimodal content, SoWide-v2 employs separate embedding layers and fully connected layers for each set of features. This process allows it to project sparse, high-dimensional, and low-level features into higher-level representations. To handle each modality appropriately, SoWide-v2 employs distinct deep models for feature processing. Specifically, it uses the TabTransformer [21] for tabular features and multilingual BERT [9] for textual content; the original SoWide used the attention branch network [16] for images but for SoWide-v2 we replaced it with a Vision Transformer [10]. Additionally, a fully connected layer is utilized to project the sparse high-dimensional features into a denser low-dimensional representation. These representations are subsequently concatenated and fed into another fully connected layer, followed by a softmax function for CTR classification, facilitating end-to-end joint learning. The model is trained using stochastic gradient descent (SGD) for 100 epochs, and hyper-parameter optimization is performed with the tree-structured Parzen estimator [4].", + "bbox": [ + 511, + 450, + 913, + 714 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For evaluation results, we use the same datasets and baselines as the original SoWide paper [20], comparing the performance of SoWide-v2 against the original SoWide and several conventional machine learning baselines (there appears to be no previous work on CTR prediction before [20] that could be used for a direct comparison) using the F1-score, a widely used classification metric. Evaluation is done in two different settings: for general ad campaigns and also specifically for campaigns targeting the \"Conversion\" objective, which represents the two most prevalent and significant ad campaign objectives. The results shown in Table 1 demonstrate that the SoWide-v2 model presents an improvement over the original SoWide, and both models significantly outperform all classical ML baselines. Notably, the F1-score for the general ad campaigns reaches 0.78,", + "bbox": [ + 511, + 714, + 915, + 893 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Against Opacity: Explainable AI and Large Language Models for Effective Digital Advertising", + "bbox": [ + 83, + 75, + 522, + 87 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada", + "bbox": [ + 624, + 75, + 911, + 85 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/d81a1b7e2888f389fc9dce32e0582f52aaa9238833b2c0fb8e377e88c7f887e1.jpg", + "table_caption": [ + "Table 1: F1-score evaluation for CTR prediction models." + ], + "table_footnote": [], + "table_body": "
AllCon- vers- sion
k-nearest neighbors0.3380.254
Random forest0.3020.293
Gradient boosting0.3490.262
AdaBoost0.2890.277
Multilayer perceptron0.6540.642
SoWide0.7020.660
SoWide-v20.7800.671
", + "bbox": [ + 94, + 157, + 253, + 388 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Content Analysis", + "text_level": 1, + "bbox": [ + 285, + 107, + 406, + 119 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Average Predicted Score Low", + "bbox": [ + 285, + 125, + 416, + 133 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$\\mathbb{N}^2$ of Ads: 16", + "bbox": [ + 285, + 138, + 346, + 147 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$\\mathsf{N}^{\\mathsf{g}}$ of Creatives: 25", + "bbox": [ + 285, + 152, + 367, + 162 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/acf1d5dc66621283575e40bafc9c60cf3bb0955f77888301abc3921b2143422b.jpg", + "image_caption": [ + "Figure 4: Sample predicted low-CTR and high-CTR advertising banners and heatmap visualizations of the attention layers involved in the prediction." + ], + "image_footnote": [], + "bbox": [ + 271, + 171, + 424, + 349 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/4ffae6c66addebde4791fb51721097dd80f7180f5efcc93a3f6e017e285a8ce0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 433, + 172, + 586, + 349 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/4bd9708227b626fdfcf010ad44a602d119731127edb8f5742a99ca7b77df091b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 589, + 172, + 748, + 349 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/1eaf6b5d2722b766c5e99af3c555f3538205c698b313ac8e7426daef3cae5cd9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 751, + 172, + 913, + 349 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "which confirms that the SoWide-v2 approach effectively accommodates the hierarchical structure inherent in advertising data, enabling effective multimodal learning for the prediction of ad performance. The results validate that SoWide-v2 is a state-of-the-art CTR prediction model.", + "bbox": [ + 81, + 401, + 482, + 469 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Thus far, we have introduced a framework that enables advertisers to assess the potential performance of their own content, and potentially that of their competitors, prior to its launch. This represents a valuable tactical capability that had been unavailable to the community for a long time. However, once a creative marketer gains access to the initial prediction results for a specific content piece, another significant challenge lies in comprehending the underlying factors that contribute to its success or failure. What went wrong, what was done right, and how do we amplify the right parts while suppressing the wrong parts?", + "bbox": [ + 81, + 470, + 482, + 609 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "One approach to address this question would be to utilize various visualization techniques, specifically those that illustrate the decision-making process of the neural network while making a specific prediction. If the prediction is accurate, such visualizations are believed to provide insights into the underlying reasons behind the performance of a creative asset. Consequently, these visualizations can serve as a valuable resource for marketers in making informed decisions regarding the inclusion of specific components in future creative assets, enabling them to effectively communicate their requirements to the creative team.", + "bbox": [ + 81, + 609, + 482, + 747 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Figure 4 shows an illustrative example of such visualizations. The attention layers of the neural network used for CTR prediction are visualized as interactive heatmaps, revealing the specific regions of the banner that significantly influence the model's predictions. The figure shows that such attention visualization highlights the key elements within a Singtel banner (on the left) that contribute to its high predicted performance, namely gaming-related objects such as the monitor and the game controller. These elements effectively convey the message that a superior internet connection is essential for enhancing the gaming experience. Similarly, for the Circles.Life", + "bbox": [ + 81, + 747, + 482, + 886 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "banner (on the right), the areas featuring the lady in the background were found to negatively impact its performance. This suggests that the composition and balance of the banner's visual elements, particularly in relation to the overall content creation practices, may have influenced its predicted low CTR values.", + "bbox": [ + 511, + 401, + 913, + 470 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 EXPLAINING HUMANS FOR HUMANS: SODA, A LLM-BASED ADVERTISING ANALYSIS FRAMEWORK", + "text_level": 1, + "bbox": [ + 513, + 484, + 870, + 532 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In the last section, we presented a system capable of effectively capturing and visualizing the factors influencing the predicted performance of ads in terms of potential CTR. However, in domains such as performance marketing decisions for choosing specific creatives for campaigns often need to be made under tight deadlines, sometimes literally in a few hours or even minutes. Moreover, these industries are characterized by large volumes of creative assets and a multitude of promotions simultaneously conducted by competitors in an \"always-on\" manner. Therefore, one cannot run detailed analysis for every ad, and there is a dire need for further automated analytical tools that would enable human marketers to rapidly comprehend available data and information.", + "bbox": [ + 511, + 536, + 913, + 700 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In order to address this challenge, we present an extension to our framework with a novel approach that leverages large language models (LLMs) to provide additional insights into the data and CTR predictions, called SODA. We outline an analytical pipeline that incorporates LLM-based explanations and generations and demonstrate its practical applications through a real-world scenario involving four Singapore telecommunication companies. This part of our framework aims to enhance the interpretability and comprehension of the data, facilitating better-informed decision-making in these fast-paced and competitive industries.", + "bbox": [ + 511, + 702, + 913, + 840 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The general pipeline of our analysis is shown in Figure 5. First, we use an LLM to extract specific well-defined insights from input ads, such as the needs served by this ad, products being advertised, and more (see below); the insights can be stored as features in", + "bbox": [ + 511, + 840, + 913, + 896 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada", + "bbox": [ + 84, + 75, + 374, + 85 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Qi Yang, Marlo Ongpin, Sergey Nikolenko, Alfred Huang, and Aleksandr Farseev", + "bbox": [ + 531, + 75, + 913, + 87 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/40419723bf25fd8c75bdcd9f0b89e0e34c20da748523b3877ae518fe1cc38457.jpg", + "image_caption": [ + "Figure 5: General pipeline of our LLM-based analysis" + ], + "image_footnote": [], + "bbox": [ + 98, + 103, + 330, + 542 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/f8147ac1545644272b718d70da3125c80dd591c88d270d8ca53e061febc8d9b0.jpg", + "image_caption": [ + "Figure 6: Sample ad analysis" + ], + "image_footnote": [], + "bbox": [ + 352, + 103, + 921, + 242 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/78bab7b34c60a5323f9d775cdbd3ac401be8ac157a094161dd56c211689b0508.jpg", + "image_caption": [ + "Figure 7: Sample brand persona analysis results" + ], + "image_footnote": [], + "bbox": [ + 357, + 268, + 916, + 431 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/c3822a688e66daeb84d3b44532b2a359cfc5d46323c77f761cbffddcf11b66aa.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 357, + 453, + 916, + 565 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/b3e7aeef26d19972d49e9be7539552e1ba549f10a543d21e3e3e05c7533dc5e6.jpg", + "image_caption": [ + "Figure 8: Sample brand comparative analysis results", + "Figure 9: Sample user persona generation results." + ], + "image_footnote": [], + "bbox": [ + 84, + 585, + 913, + 756 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "tabular form. Then, we use these features together with further engineered prompts to perform generalizing analysis of a brand's target audiences, personas, needs, and insights expressed by the ads, tone, and topical categories of the current campaign and others. The resulting coverage of the campaign closely reflects campaign analysis commonly performed by marketing professionals and can be further used to tune the brand's message, tone, target audiences, personas, and more. The pipeline is also able to present", + "bbox": [ + 81, + 784, + 483, + 896 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "specific examples helpful for marketing professionals, such as sample (imagined) user profiles or user personas, which are also one of the common marketing tools. Let us dive into some details.", + "bbox": [ + 511, + 784, + 915, + 825 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Figure 6 shows sample results of our initial experiments on ad analysis. We selected batches of ads from the Facebook Ad Library for the same brand and processed them with an LLM, customized only with natural language prompt engineering. As a result, the LLM has been able to successfully identify key features of each", + "bbox": [ + 511, + 825, + 913, + 896 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Against Opacity: Explainable AI and Large Language Models for Effective Digital Advertising", + "bbox": [ + 83, + 75, + 524, + 87 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada", + "bbox": [ + 625, + 75, + 911, + 85 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "advertisement, including excellent responses to such seemingly \"human\" questions as identifying the human need, human insight, and the main archetypes used in an ad. Moreover, answers to most questions are standardized (as the LLM was instructed) and can be subject to automated processing. This kind of analysis has always been a key part of online marketing, and to the best of our knowledge, it has never been successfully automated and scaled up before. Such tasks had always required human labeling and thus had been restricted to a few sample ads rather than the entire dataset.", + "bbox": [ + 81, + 106, + 480, + 231 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As the next step, we use the ads and extracted features as inputs for a number of prompts asking to summarize information in a variety of formats commonly used in content marketing. We have seen successful summarization across the board, with important insights identified by the LLM and presented in an accessible and actionable format. Fig. 7 shows a sample result of our brand persona analysis, complete with main brand values used in the ad campaigns, the goals of using them, and detailed analysis of the primary \"caregiver\" persona, including supporting examples from the data.", + "bbox": [ + 81, + 231, + 480, + 354 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Figure 8 shows the results of a comparative analysis of four advertising campaigns run over the same time period by different brands. Again, the LLM has correctly identified its key distinguishing factors, and the list of differences is very similar to one that could be produced by a human marketing professional.", + "bbox": [ + 81, + 356, + 480, + 425 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Another avenue for using state-of-the-art generative AI capabilities that we have explored is user persona generation, an important tool in content marketing that has long proven to be useful for creative work[12, 14, 40]. To produce user personas, we begin with a list of interests (either extracted as shown in Fig. 9 or obtained from the client and/or social media platform) and prompt the LLM to give examples of user descriptions that could fit such interests. Fig. 7 shows a sample resulting user persona, which is fully believable to the professionals. To make the result even more tangible, we supplement such user personas with images generated by a state-of-the-art text-image model, in this case, Stable Diffusion [31]. To make the entire pipeline self-contained we ask the original LLM to also generate the prompt for the text-image model from the user persona description and a few examples of good prompts. The results also illustrated in Fig. 7, are very promising.", + "bbox": [ + 81, + 425, + 482, + 632 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The LLM used in all experiments was ChatGPT based on GPT-3.5 [27], and we believe that simply switching to more powerful LLMs such as GPT-4 [26] may lead to further increased performance across all applications. Note also that while GPT-3.5 can only process text ads, GPT-4 is already able to analyze images jointly with text (this ability has not yet been made public at the time of writing), which is arguably even more important for content marketing.", + "bbox": [ + 81, + 632, + 482, + 729 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 CASE STUDY", + "text_level": 1, + "bbox": [ + 83, + 752, + 223, + 766 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To evaluate the practical value and viability of the proposed framework expansion using large language models (LLMs) for generating rapid insights and enabling prompt marketing-related decision-making, we have engaged 12 marketing professionals currently employed at marketing departments of Business-to-Consumer (B2C) brands or advertising and marketing agencies across Singapore, China, and the UK. These professionals were selected based on their extensive experience, averaging 9 years, in managing digital marketing campaigns across various industries.", + "bbox": [ + 81, + 770, + 482, + 896 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The professionals were presented with the details and results of our preliminary experiments analyzing and comparing the marketing campaigns of four major telecommunication companies in Singapore, as described in Section 3. They were then asked to provide their perspectives on the usefulness, quality, and potential impact of the insights and outputs generated by our framework.", + "bbox": [ + 511, + 106, + 913, + 189 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "All 12 professionals responded very positively about the value of our approach. They found high-level overviews of brand positioning and audience targeting strategies, enriched with specific examples, to be highly useful to gain quick familiarity with brand messaging and inspire new creative directions. The generated user personas and accompanying AI-generated images were praised for bringing additional richness and tangibility to the insights.", + "bbox": [ + 511, + 189, + 913, + 286 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Several professionals commented that the coherent, standardized format of the outputs would allow for efficient processing and decision-making, especially given the tight timeframes frequently faced in the industry. More senior professionals have expressed that they foresee solutions like ours significantly augmenting and accelerating essential marketing functions through the automation of repetitive, labor-intensive tasks.", + "bbox": [ + 511, + 286, + 911, + 382 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "This highly encouraging feedback from advertising and marketing professionals suggests strong potential business value in developing and applying AI-powered solutions, such as the proposed extension of our framework, for the automation of marketing campaign analysis and strategic planning. While adoption may face initial resistance, especially from very senior professionals, many in the industry seem poised to welcome AI augmenting and enhancing their work. Our approach, focused on mimicking established human processes and outputs, appears well-suited to addressing common pain points and unlocking new efficiencies, especially in such fast-paced domains as performance marketing.", + "bbox": [ + 511, + 383, + 913, + 536 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 CONCLUSION", + "text_level": 1, + "bbox": [ + 514, + 566, + 663, + 580 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this work, we have presented a novel advertising analysis framework, called SODA, which amalgamates large language models, explainable artificial intelligence, and attention map visualization techniques, heralding a potential future of human-AI collaboration within the realm of digital advertising. Through the integration of LLMs and the incorporation of explainability aspects, our novel approach envisions enhanced efficiency and synergy between marketers and AI systems, hopefully leading to a new era of intelligent decision-making. We believe that our approach holds the promise of empowering a new generation of marketers to leverage advanced AI technologies effectively, fostering a deeper understanding of the underlying mechanisms driving ad performance and facilitating informed decision-making processes. Note that while we already show promising results, these are mostly preliminary experiments, and we strongly believe that this direction of research will bring many new advances in the nearest future.", + "bbox": [ + 511, + 585, + 913, + 806 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "6 ACKNOWLEDGEMENT", + "text_level": 1, + "bbox": [ + 513, + 849, + 741, + 862 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "This work was funded by the Russian Science Foundation grant No 22-11-00135 https://rscf.ru/en/project/22-11-00135/", + "bbox": [ + 511, + 867, + 913, + 896 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada", + "bbox": [ + 84, + 75, + 372, + 85 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Qi Yang, Marlo Ongpin, Sergey Nikolenko, Alfred Huang, and Aleksandr Farseev", + "bbox": [ + 531, + 75, + 911, + 87 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 84, + 104, + 202, + 119 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] [n.d.]. Best practices to potentially reduce cost per result for Meta ads. https://www.facebook.com/business/help/321695409726523.", + "[2] [n.d.]. Netflix Prize data. https://www.kaggle.com/datasets/netflix-inc/netflix-prize-data.", + "[3] Anton Alekseev, Elena Tutubalina, Sejeong Kwon, and Sergey Nikolenko. 2022. Near-Zero-Shot Suggestion Mining with a Little Help from WordNet. In Analysis of Images, Social Networks and Texts. Springer International Publishing, Cham, 23-36.", + "[4] J. Bergstra, R. Bardenet, Y. Bengio, and B. Kegl. 2011. Algorithms for HyperParameter Optimization. In Advances in Neural Information Processing Systems, Vol. 24. Curran Associates, Inc.", + "[5] K Buraya, A Farseev, and A Filchenkov. 2018. Multi-view personality profiling based on longitudinal data. Lecture Notes in Computer Science 11018 (2018), 15-27.", + "[6] Jiawei Chen, Hande Dong, Xiang Wang, Fuli Feng, Meng Wang, and Xiangnan He. 2023. Bias and Debias in Recommender System: A Survey and Future Directions. ACM Trans. Inf. Syst. 41, 3, Article 67 (feb 2023), 39 pages.", + "[7] H.-T. Cheng, L. Koc, J. Harmsen, T. Shaked, T. Chandra, H. Aradhye, G. Anderson, G. Corrado, W. Chai, M. Ispir, R. Anil, Z. Haque, L. Hong, V. Jain, X. Liu, and H. Shah. 2016. Wide & Deep Learning for Recommender Systems. In Proc. 1st Workshop on Deep Learning for Recommender Systems (Boston, MA, USA) (DLRS 2016). ACM, New York, NY, USA, 7-10.", + "[8] Alok Kumar Chowdhury, Aleksandr Farseev, Prithwi Raj Chakraborty, Dian Tjondronegoro, and Vinod Chandran. 2017. Automatic classification of physical exercises from wearable sensors using small dataset from non-laboratory settings. In 2017 IEEE Life Sciences Conference (LSC). 111-114. https://doi.org/10.1109/LSC.2017.8268156", + "[9] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proc. 2019 NAACL. ACL, 4171-4186.", + "[10] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. 2020. An image is worth $16 \\times 16$ words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020).", + "[11] Yali Du, Yinwei Wei, Wei Ji, Fan Liu, Xin Luo, and Liqiang Nie. 2023. Multi-Queue Momentum Contrast for Microvideo-Product Retrieval. In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining (Singapore, Singapore) (WSDM '23). ACM, New York, NY, USA, 1003–1011. https://doi.org/10.1145/3539597.3570405", + "[12] Aleksandr Farseev. 2023. Under the Hood of Social Media Advertising: How Do We Use AI Responsibly for Advertising Targeting and Creative Evaluation. In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining (Singapore, Singapore) (WSDM '23). ACM, New York, NY, USA, 1281-1282. https://doi.org/10.1145/3539597.3575791", + "[13] A Farseev, N Gukov, I Gossoudarev, and U Zarichnyak. 2014. Cross-platform online venue and user community recommendation based upon social networks data mining. Computer Instruments in Education 6 (2014), 28-38.", + "[14] Aleksandr Farseev, Kirill Lepikhin, Hendrik Schwartz, Eu Khoon Ang, and Kenny Powar. 2018. SoMin.AI: Social Multimedia Influencer Discovery Marketplace. In Proceedings of the 26th ACM International Conference on Multimedia (Seoul, Republic of Korea) (MM '18). ACM, New York, NY, USA, 1234-1236. https://doi.org/10.1145/3240508.3241387", + "[15] Aleksandr Farseev, Qi Yang, Andrey Filchenkov, Kirill Lepikhin, Yu-Yi Chu-Farseeva, and Daron-Benjamin Loo. 2021. SoMin.Ai: Personality-Driven Content Generation Platform. In Proceedings of the 14th ACM International Conference on Web Search and Data Mining (WSDM '21). ACM, New York, NY, USA, 890-893. https://doi.org/10.1145/3437963.3441714", + "[16] H. Fukui, T. Hirakawa, T. Yamashita, and H. Fujiyoshi. 2019. Attention Branch Network: Learning of Attention Mechanism for Visual Explanation. Computer Vision and Pattern Recognition (2019), 10705-10714.", + "[17] T. Ge, H. Liu, P. Yi, S. Huang, Z. Zhang, X. Zhu, Y. Zhang, K. Gai, L. Zhao, G. Zhou, K. Chen, S. Liu, H. Yi, Z. Hu, B. Liu, and P. Sun. 2018. Image Matters: Visually Modeling User Behaviors Using Advanced Model Server. 2087-2095.", + "[18] Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu, and Tat-Seng Chua. 2017. Neural Collaborative Filtering. In Proceedings of the 26th International Conference on World Wide Web (Perth, Australia) (WWW'17). International World Wide Web Conferences Steering Committee, Republic and Canton of Geneva, CHE, 173-182. https://doi.org/10.1145/3038912.3052569", + "[19] X. He, J. Pan, O. Jin, T. Xu, B. Liu, T. Xu, Y. Shi, A. Atallah, R. Herbrich, S. Bowers, and J. Q. Candela. 2014. Practical Lessons from Predicting Clicks on Ads at Facebook. In Proc. 8th International Workshop on Data Mining for Online Advertising (ADKDD'14). ACM, 1-9.", + "[20] Alfred Huang, Qi Yang, Sergey Nikolenko, Marlo Ongpin, Ilia Gossoudarev, Ngoc Yen Duong, Kirill Lepikhin, Sergey Vishnyakov, Yuyi Chu-Farseeva, and Aleksandr Farseev. 2023. SoCraft: Advertiser-Level Predictive Scoring for Creative Performance on Meta. In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining (Singapore, Singapore) (WSDM '23). ACM," + ], + "bbox": [ + 86, + 123, + 482, + 888 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "New York, NY, USA, 1132-1135. https://doi.org/10.1145/3539597.3573032", + "[21] X. Huang, A. Khetan, M. Cvitkovic, and Z. Karnin. 2020. TabTransformer: Tabular Data Modeling Using Contextual Embeddings. (2020). arXiv:2012.06678 [cs.LG]", + "[22] Sergei Koltcov, Olessia Koltsova, and Sergey Nikolenko. 2014. Latent Dirichlet Allocation: Stability and Applications to Studies of User-Generated Content. In Proceedings of the 2014 ACM Conference on Web Science (Bloomington, Indiana, USA) (WebSci '14). ACM, New York, NY, USA, 161–165. https://doi.org/10.1145/2615569.2615680", + "[23] H. B. McMahan, G. Holt, D. Sculley, M. Young, D. Ebner, J. Grady, L. Nie, T. Phillips, E. Davydov, D. Golovin, S. Chikkerur, D. Liu, M. Wattenberg, A. M. Hrafinkelsson, T. Boulos, and J. Kubica. 2013. Ad Click Prediction: A View from the Trenches. In Proc. 19th ACM SIGKDD (KDD '13). ACM, 1222-1230.", + "[24] Meta. 2023. Meta Blueprint. https://www.facebookblueprint.com/student/catalog Accessed on June 06, 2023.", + "[25] Sergey Nikolenko. 2015. SVD-LDA: Topic Modeling for Full-Text Recommender Systems. In Advances in Artificial Intelligence and Its Applications, Odbulia Pichardo Lagunas, Oscar Herrera Alcantara, and Gustavo Arroyo Figueroa (Eds.). Springer International Publishing, Cham, 67-79.", + "[26] OpenAI. 2023. GPT-4 Technical Report. arXiv:2303.08774 [cs.CL]", + "[27] Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob Hilton, Fraser Kelton, Luke Miller, Maddie Simens, Amanda Askell, Peter Welinder, Paul Christiano, Jan Leike, and Ryan Lowe. 2022. Training language models to follow instructions with human feedback. arXiv:2203.02155 [cs.CL]", + "[28] Wentao Ouyang, Xiwu Zhang, Shukui Ren, Chao Qi, Zhaojie Liu, and Yanlong Du. 2019. Representation Learning-Assisted Click-Through Rate Prediction. In Proc. 28th IJCAI, 4561-4567. https://doi.org/10.24963/ijcai.2019/634", + "[29] Francesco Ricci, Lior Rokach, Bracha Shapira, and Paul B. Kantor. 2010. Recommender Systems Handbook (1st ed.). Springer-Verlag, Berlin, Heidelberg.", + "[30] Matthew Richardson, Ewa Dominowska, and Robert Ragno. 2007. Predicting Clicks: Estimating the Click-through Rate for New Ads. In Proc. 16th WWWW (WWW '07), ACM, 521-530.", + "[31] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. 2022. High-Resolution Image Synthesis With Latent Diffusion Models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). 10684-10695.", + "[32] Andrey Savchenko, Anton Alekseev, Sejeong Kwon, Elena Tutubalina, Evgeny Myasnikov, and Sergey Nikolenko. 2020. Ad Lingua: Text Classification Improves Symbolism Prediction in Image Advertisements. In Proceedings of the 28th International Conference on Computational Linguistics. International Committee on Computational Linguistics, Barcelona, Spain (Online), 1886-1892. https://doi.org/10.18653/v1/2020.coling-main.171", + "[33] Ilya Shenbin, Anton Alekseev, Elena Tutubalina, Valentin Malykh, and Sergey I. Nikolenko. 2020. RecVAE: A New Variational Autoencoder for Top-N Recommendations with Implicit Feedback. In Proceedings of the 13th International Conference on Web Search and Data Mining (Houston, TX, USA) (WSDM '20). ACM, New York, NY, USA, 528-536. https://doi.org/10.1145/3336191.3371831", + "[34] Elena Tutubalina and Sergey I. Nikolenko. 2017. Demographic Prediction based on User Reviews about Medications. Computación y sistemas 21, 2 (2017), 227-241.", + "[35] Elena Tutubalina and Sergey I. Nikolenko. 2018. Exploring convolutional neural networks and topic models for user profiling from drug reviews. Multimedia Tools and Applications 77, 4 (2018), 4791-4809.", + "[36] Wenjie Wang, Fuli Feng, Liqiang Nie, and Tat-Seng Chua. 2022. User-Controllable Recommendation Against Filter Bubbles. In Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval (Madrid, Spain) (SIGIR '22). ACM, New York, NY, USA, 1251-1261. https://doi.org/10.1145/3477495.3532075", + "[37] Qi Yang, Aleksandr Farseev, and Andrey Filchenkov. 2021. Two-Faced Humans on Twitter and Facebook: Harvesting Social Multimedia for Human Personality Profiling. In Proceedings of the 2021 Workshop on Intelligent Cross-Data Analysis and Retrieval (Taipei, Taiwan) (ICDAR '21). ACM, New York, NY, USA, 39-47. https://doi.org/10.1145/3463944.3469270", + "[38] Qi Yang, Aleksandr Farseev, Sergey Nikolenko, and Andrey Filchenkov. 2022. Do we behave differently on Twitter and Facebook: Multi-view social network user personality profiling for content recommendation. Frontiers in Big Data 5 (2022). https://doi.org/10.3389/fdata.2022.931206", + "[39] Qi Yang, Sergey Nikolenko, Alfred Huang, and Aleksandr Farseev. 2022. Personality-Driven Social Multimedia Content Recommendation. In Proceedings of the 30th ACM International Conference on Multimedia (Lisboa, Portugal) (MM '22). ACM, New York, NY, USA, 7290-7299. https://doi.org/10.1145/3503161.3548769", + "[40] Qi Yang, Christos Tzelepis, Sergey Nikolenko, Ioannis Patras, and Aleksandr Farseev. 2023. \"Just To See You Smile\": SMILEY, a Voice-Guided GUY GAN. In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining (Singapore, Singapore) (WSDM '23). ACM, New York, NY, USA, 1196-1199. https://doi.org/10.1145/3539597.3573031" + ], + "bbox": [ + 517, + 109, + 911, + 883 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Against Opacity: Explainable AI and Large Language Models for Effective Digital Advertising", + "bbox": [ + 83, + 75, + 522, + 87 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada", + "bbox": [ + 625, + 75, + 911, + 85 + ], + "page_idx": 6 + } +] \ No newline at end of file diff --git a/data/2025/2504_20xxx/2504.20064/3484c7dd-5472-4f2c-ab8a-d4410cc59cb3_model.json b/data/2025/2504_20xxx/2504.20064/3484c7dd-5472-4f2c-ab8a-d4410cc59cb3_model.json new file mode 100644 index 0000000000000000000000000000000000000000..4b2529b3c0a84510421c41a2f36e931f22539e83 --- /dev/null +++ b/data/2025/2504_20xxx/2504.20064/3484c7dd-5472-4f2c-ab8a-d4410cc59cb3_model.json @@ -0,0 +1,1842 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.273, + 0.061, + 0.709 + ], + "angle": 270, + "content": "arXiv:2504.20064v1 [cs.IR] 22 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.088, + 0.101, + 0.911, + 0.152 + ], + "angle": 0, + "content": "Against Opacity: Explainable AI and Large Language Models for Effective Digital Advertising" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.163, + 0.266, + 0.18 + ], + "angle": 0, + "content": "Qi Yang" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.181, + 0.287, + 0.195 + ], + "angle": 0, + "content": "yangqi@itmo.ru" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.196, + 0.29, + 0.209 + ], + "angle": 0, + "content": "ITMO University" + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.211, + 0.313, + 0.225 + ], + "angle": 0, + "content": "Saint Petersburg, Russia" + }, + { + "type": "text", + "bbox": [ + 0.442, + 0.163, + 0.558, + 0.18 + ], + "angle": 0, + "content": "Marlo Ongpin" + }, + { + "type": "text", + "bbox": [ + 0.442, + 0.181, + 0.556, + 0.194 + ], + "angle": 0, + "content": "marlo@somin.ai" + }, + { + "type": "text", + "bbox": [ + 0.438, + 0.196, + 0.562, + 0.208 + ], + "angle": 0, + "content": "SoMin.ai Research" + }, + { + "type": "text", + "bbox": [ + 0.428, + 0.211, + 0.572, + 0.225 + ], + "angle": 0, + "content": "Singapore, Singapore" + }, + { + "type": "text", + "bbox": [ + 0.696, + 0.163, + 0.84, + 0.18 + ], + "angle": 0, + "content": "Sergey Nikolenko" + }, + { + "type": "text", + "bbox": [ + 0.683, + 0.181, + 0.854, + 0.195 + ], + "angle": 0, + "content": "sergey@logic.pdmi.ras.ru" + }, + { + "type": "text", + "bbox": [ + 0.71, + 0.196, + 0.827, + 0.21 + ], + "angle": 0, + "content": "ITMO University" + }, + { + "type": "text", + "bbox": [ + 0.659, + 0.211, + 0.877, + 0.224 + ], + "angle": 0, + "content": "Steklov Institute of Mathematics" + }, + { + "type": "text", + "bbox": [ + 0.687, + 0.226, + 0.849, + 0.24 + ], + "angle": 0, + "content": "Saint Petersburg, Russia" + }, + { + "type": "text", + "bbox": [ + 0.308, + 0.252, + 0.422, + 0.269 + ], + "angle": 0, + "content": "Alfred Huang" + }, + { + "type": "text", + "bbox": [ + 0.308, + 0.27, + 0.422, + 0.283 + ], + "angle": 0, + "content": "alfred@somin.ai" + }, + { + "type": "text", + "bbox": [ + 0.303, + 0.285, + 0.428, + 0.297 + ], + "angle": 0, + "content": "SoMin.ai Research" + }, + { + "type": "text", + "bbox": [ + 0.294, + 0.3, + 0.437, + 0.314 + ], + "angle": 0, + "content": "Singapore, Singapore" + }, + { + "type": "text", + "bbox": [ + 0.558, + 0.252, + 0.708, + 0.267 + ], + "angle": 0, + "content": "Aleksandr Farseev" + }, + { + "type": "text", + "bbox": [ + 0.578, + 0.27, + 0.688, + 0.282 + ], + "angle": 0, + "content": "sasha@somin.ai" + }, + { + "type": "text", + "bbox": [ + 0.57, + 0.285, + 0.696, + 0.297 + ], + "angle": 0, + "content": "SoMin.ai Research" + }, + { + "type": "text", + "bbox": [ + 0.562, + 0.3, + 0.705, + 0.314 + ], + "angle": 0, + "content": "Singapore, Singapore" + }, + { + "type": "image", + "bbox": [ + 0.125, + 0.33, + 0.871, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.207, + 0.497, + 0.788, + 0.512 + ], + "angle": 0, + "content": "Figure 1: Overview of the LLM-based advertising analysis framework SODA (Section 3)." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.518, + 0.184, + 0.531 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.536, + 0.484, + 0.772 + ], + "angle": 0, + "content": "The opaqueness of modern digital advertising, exemplified by platforms such as Meta Ads, raises concerns regarding their autonomous control over audience targeting, pricing structures, and ad relevancy assessments. Locked in their leading positions by network effects, \"Metas and Googles of the world\" attract countless advertisers who rely on intuition, with billions of dollars lost on ineffective social media ads. The platforms' algorithms use huge amounts of data unavailable to advertisers, and the algorithms themselves are opaque as well. This lack of transparency hinders the advertisers' ability to make informed decisions and necessitates efforts to promote transparency, standardize industry metrics, and strengthen regulatory frameworks. In this work, we propose novel ways to assist marketers in optimizing their advertising strategies via machine learning techniques designed to analyze and evaluate content, in particular, predict the click-through rates (CTR) of novel advertising content. Another important problem is that large volumes of data available in the competitive landscape, e.g., competitors' ads," + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.781, + 0.483, + 0.853 + ], + "angle": 0, + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.854, + 0.359, + 0.864 + ], + "angle": 0, + "content": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.865, + 0.473, + 0.875 + ], + "angle": 0, + "content": "© 2023 Copyright held by the owner/author(s). Publication rights licensed to ACM." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.876, + 0.295, + 0.884 + ], + "angle": 0, + "content": "ACM ISBN 979-8-4007-0108-5/23/10...$15.00" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.885, + 0.273, + 0.896 + ], + "angle": 0, + "content": "https://doi.org/10.1145/3581783.3612817" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.519, + 0.916, + 0.672 + ], + "angle": 0, + "content": "impede the ability of marketers to derive meaningful insights. This leads to a pressing need for a novel approach that would allow us to summarize and comprehend complex data. Inspired by the success of ChatGPT in bridging the gap between large language models (LLMs) and a broader non-technical audience, we propose a novel system that facilitates marketers in data interpretation, called SODA, that merges LLMs with explainable AI, enabling better human-AI collaboration with an emphasis on the domain of digital marketing and advertising. By combining LLMs and explainability features, in particular modern text-image models, we aim to improve the synergy between human marketers and AI systems." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.683, + 0.653, + 0.697 + ], + "angle": 0, + "content": "CCS CONCEPTS" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.702, + 0.916, + 0.744 + ], + "angle": 0, + "content": "- Information systems \\(\\rightarrow\\) Learning to rank; Multimedia and multimodal retrieval; Computational advertising; Multimedia and multimodal retrieval; Computational advertising." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.756, + 0.622, + 0.769 + ], + "angle": 0, + "content": "KEYWORDS" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.774, + 0.915, + 0.803 + ], + "angle": 0, + "content": "Digital Advertising, Ads Performance Prediction, Deep Learning, Large Language Model, Explainable AI" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.808, + 0.662, + 0.819 + ], + "angle": 0, + "content": "ACM Reference Format:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.82, + 0.915, + 0.895 + ], + "angle": 0, + "content": "Qi Yang, Marlo Ongpin, Sergey Nikolenko, Alfred Huang, and Aleksandr Farseev. 2023. Against Opacity: Explanable AI and Large Language Models for Effective Digital Advertising. In Proceedings of the 31st ACM International Conference on Multimedia (MM '23), October 29-November 3, 2023, Ottawa, ON, Canada. ACM, New York, NY, USA, 7 pages. https://doi.org/10.1145/3581783.3612817" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.374, + 0.087 + ], + "angle": 0, + "content": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada" + }, + { + "type": "header", + "bbox": [ + 0.532, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "Qi Yang, Marlo Ongpin, Sergey Nikolenko, Alfred Huang, and Aleksandr Farseev" + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.106, + 0.258, + 0.12 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.125, + 0.482, + 0.346 + ], + "angle": 0, + "content": "The online advertising industry is the poster child of data science. Google and Facebook became industry-dominating behemoths to a large extent because they excelled at crunching the numbers and showing the best online ads to their primary assets, user audiences, while Amazon did the same for item recommendations in its online store. In academia, the Netflix Prize Competition [2] devoted to movie recommendations was one of the first open competitions with serious prizes and organization, a pioneer that would eventually lead to Kaggle and innumerable open leaderboards that nowadays track the state of the art in virtually every measurable ML task. The Netflix Prize itself has led to significant breakthroughs in collaborative filtering, and its dataset is still used as one of the standard benchmarks [2]. One definitely cannot say that the field of recommender systems, in particular online advertising, lacks the attention of machine learning researchers, and many important advances keep being made every month [6, 11, 18, 29, 33, 36-38]." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.347, + 0.483, + 0.54 + ], + "angle": 0, + "content": "However, most advances are being made on the side of the platforms (ad marketplaces) such as Facebook (Meta) [19], Google [23], Alibaba [28], or Taobao [17], and therefore they are not accessible to the advertising platform users, i.e., digital marketers. Collaborative filtering datasets are understandably private, and marketing professionals that create advertising content do not have access to the data needed to predict their own future performance. Note that these predictions are often self-fulfilling: if, e.g., Meta models predict low click-through ratio (CTR) for your ad, Meta will charge you more for showing it, probably show it less, and the campaign will likely be a failure regardless of how accurate the CTR prediction has been [1]. Often, there is no practical way to control the cost of advertising; technically, if a platform decided to charge more money for an ad nothing could prevent them from doing so." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.54, + 0.483, + 0.678 + ], + "angle": 0, + "content": "Moreover, even if marketing professionals could run the corresponding models, that would only be of modest help with their job, which is content creation. Suppose that a model tells you that your new ad is a bad match for your audience, and the expected CTR is low. How do you fix that? It cannot be a pure collaborative filtering model since it has to predict CTR for a new ad that has not been shown to users yet, but it is still an opaque model that maps your ad content into a latent representation via \"giant inscrutable matrices\". So all you can do even if you have such a model is to try and make a different ad, get a new prediction, and work via trial and error." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.679, + 0.483, + 0.789 + ], + "angle": 0, + "content": "One potential way to address this issue involves visualizing the decision-making process of a neural network, providing marketers with insights into the rationale behind specific predictions made by AI models [3, 15, 20, 39]. Therefore, our first contribution in this work is a new variation of a state-of-the-art CTR prediction model coupled with a mechanism for analyzing the ad images (banners) via an image attention mechanism. The results provide human-understandable analysis that can be turned into actionable insights." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.789, + 0.483, + 0.886 + ], + "angle": 0, + "content": "However, this is only the beginning. Individual ad analysis via explainable ML models has proven beneficial in scenarios such as individual content evaluation prior to starting an advertising campaign, but it is much less practical when applied to large volumes of images and text ads in real-world settings. The time constraints faced by marketers impede their ability to effectively process and extract key content traits in their own advertising practices." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.108, + 0.915, + 0.328 + ], + "angle": 0, + "content": "In our opinion, the long-awaited revolution in digital advertising and content marketing will occur when both the ads themselves and the results of opaque models can be explained in ways that are both understandable for humans and actionable in terms of business results. We believe that the time for this revolution is now, and in this work we show that large language models such as GPT-3.5 [27] and GPT-4 [26] are already increasingly able to explain the \"reasoning\" behind recommender models and provide aggregate insights about advertising campaigns consisting of hundreds of individual ads. Prior to LLMs, approaches to aggregate text corpora in the context of recommender systems had been proposed via topic modeling [22, 25], sometimes coupled with deep learning [35] and user profiling [5, 34], but topic modeling is based on the bag-of-words assumption and cannot summarize text as an LLM does; visual understanding of ads had also been explored with convolutional networks [32]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.329, + 0.915, + 0.453 + ], + "angle": 0, + "content": "Therefore, our main contribution is that we present preliminary results for a road-map that could achieve this holy grail of content marketing: provide explainable, actionable insights into advertising content along with possible strategies for improvement with models that could work on the side of a small advertising agency rather than a huge platform. We begin with direct CTR prediction and then proceed to provide explainable insights and content recommendations with large language models and even visual generative AI (see Fig. 1)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.454, + 0.914, + 0.537 + ], + "angle": 0, + "content": "The paper is organized as follows: in Section 2, we present an improved model for CTR prediction and visualization procedures for advertising banners, Section 3 introduces our approach to explainable ad analysis with large language models, Section 4 shows the results of a case study that confirms the effectiveness of our approach, and Section 5 concludes the paper." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.557, + 0.885, + 0.587 + ], + "angle": 0, + "content": "2 EXPLAINING OPAQUE AI WITH AI: CTR PREDICTION AND VISUALIZATIONS" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.592, + 0.915, + 0.744 + ], + "angle": 0, + "content": "The lack of transparency within the advertising sector has been widely acknowledged as a primary reason for the inefficient allocation of advertising budgets. Notably, the responsibility for determining the cost per 1,000 impressions (CPM) and selecting competing entities in a programmatic auction rests primarily with the platform (we will use Meta as the running example). This decision-making process is in fact a result of numerous intricately interwoven machine learning (ML) models designed to dynamically match content with precise targeting criteria and individualized user profiles on Meta. These models are instrumental in estimating the likelihood of a user engaging in specific actions within the Meta ecosystem." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.744, + 0.915, + 0.897 + ], + "angle": 0, + "content": "As an illustration, consider a hypothetical Meta user named Simon who is anticipated to click on an ad (perform the \"Click\" action) with the slogan \"Up your game nights with an ultra-immersive setup\" displayed on a Meta Ad banner showcasing Singtel, a mobile operator company, and their home internet broadband product (Fig. 2). This prediction is done by Meta's internal ML models, and quite often contradicts Meta's widely publicized \"best practices\" blueprints [24]. Here, it is crucial to acknowledge the additional information that advertising engines such as Meta take into account. They are free to use factors such as Simon's past visits to telecom websites, pictures showing computer games in Simon's account" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.524, + 0.088 + ], + "angle": 0, + "content": "Against Opacity: Explainable AI and Large Language Models for Effective Digital Advertising" + }, + { + "type": "header", + "bbox": [ + 0.625, + 0.076, + 0.913, + 0.087 + ], + "angle": 0, + "content": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada" + }, + { + "type": "image", + "bbox": [ + 0.124, + 0.104, + 0.378, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.093, + 0.302, + 0.41, + 0.317 + ], + "angle": 0, + "content": "Figure 2: A sample advertising banner on Meta." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.327, + 0.483, + 0.506 + ], + "angle": 0, + "content": "on Meta, and much more. Moreover, these factors include Meta's own revenue considerations, prediction of the ad's \"relevance\" by Meta itself, timing of displaying this ad during the day, recency of the ad account (to incentivize new advertisers with improved performance), and the internal \"ranking\" of advertisers based on their history of disapproved ads, a process overseen by Meta. Regrettably, these predictive estimations are further influenced by the accuracy of Meta's ML models that profile Simon's content. For instance, when Simon is observed putting a diaper on his child, Meta's object recognition system might mistakenly associate it with an \"Inflatable Boat / Fishing\" interest; this is a real-life incident on the Meta platform, and such mistakes compound into suboptimal ad-related predictions down the line." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.507, + 0.483, + 0.604 + ], + "angle": 0, + "content": "Confronted with numerous intricate technical hurdles, digital marketers, who frequently lack technical expertise, often resort to intuitive judgment or a trial-and-error methodology in formulating and examining their creative assets within digital advertising platforms. Thus, it becomes especially important to have comprehensive data-driven guidance, not only for optimizing outcomes but also for developing cost-effective practices." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.605, + 0.483, + 0.769 + ], + "angle": 0, + "content": "One classical approach to providing this kind of guidance is to train an ML framework to predict the prospective performance of an advertising banner before allocating actual advertising budgets. In this section, we focus on the prediction of the click-through rate (CTR) metric, known to be closely associated with ad performance, particularly in the context of awareness and traffic advertising objectives. We used the recently presented SoWide model [20] as a sample state-of-the-art CTR prediction approach; its architecture is shown in Fig. 3. We updated the architecture slightly by replacing the ABN model for image processing with a Vision Transformer (ViT) [10], resulting in performance improvements, so we call it SoWide-v2." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.77, + 0.483, + 0.895 + ], + "angle": 0, + "content": "Unlike conventional supervised learning, where a data point \\((\\mathbf{x},y)\\) consists of both feature vector \\(\\mathbf{x}\\) and target variable \\(y\\), the SoWide-v2 approach incorporates data from the campaign, ad set, and potentially multiple creatives to construct the features for each ad. Data points in the model leverage text and images from all creatives together with their respective estimated performances; in case of videos, we extract keyframes to obtain multiple distinct images included as additional training data. Furthermore, we extract low-level features from tabular, textual, and visual content, resulting" + }, + { + "type": "image", + "bbox": [ + 0.451, + 0.104, + 0.891, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.562, + 0.301, + 0.78, + 0.315 + ], + "angle": 0, + "content": "Figure 3: SoWide-v2 architecture" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.327, + 0.915, + 0.452 + ], + "angle": 0, + "content": "in a comprehensive dataset that can be used to train a model capable of predicting content performance based on information from multiple modalities. After preprocessing, extracted features serve as inputs for the click-through rate (CTR) prediction model. SoWidev2 makes the assumption that the performance of advertisements converges to an underlying global distribution [8, 13, 30], so we normalize CTR values into categorical representations. Predicted scores indicate whether the content can be classified as \"below average\", \"average\", or \"above average\" in terms of quality." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.452, + 0.915, + 0.715 + ], + "angle": 0, + "content": "In essence, SoWide-v2 is a neural network based on the \"wide and deep models\" approach well known in recommender systems [7]. To facilitate representation learning for multimodal content, SoWide-v2 employs separate embedding layers and fully connected layers for each set of features. This process allows it to project sparse, high-dimensional, and low-level features into higher-level representations. To handle each modality appropriately, SoWide-v2 employs distinct deep models for feature processing. Specifically, it uses the TabTransformer [21] for tabular features and multilingual BERT [9] for textual content; the original SoWide used the attention branch network [16] for images but for SoWide-v2 we replaced it with a Vision Transformer [10]. Additionally, a fully connected layer is utilized to project the sparse high-dimensional features into a denser low-dimensional representation. These representations are subsequently concatenated and fed into another fully connected layer, followed by a softmax function for CTR classification, facilitating end-to-end joint learning. The model is trained using stochastic gradient descent (SGD) for 100 epochs, and hyper-parameter optimization is performed with the tree-structured Parzen estimator [4]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.715, + 0.916, + 0.895 + ], + "angle": 0, + "content": "For evaluation results, we use the same datasets and baselines as the original SoWide paper [20], comparing the performance of SoWide-v2 against the original SoWide and several conventional machine learning baselines (there appears to be no previous work on CTR prediction before [20] that could be used for a direct comparison) using the F1-score, a widely used classification metric. Evaluation is done in two different settings: for general ad campaigns and also specifically for campaigns targeting the \"Conversion\" objective, which represents the two most prevalent and significant ad campaign objectives. The results shown in Table 1 demonstrate that the SoWide-v2 model presents an improvement over the original SoWide, and both models significantly outperform all classical ML baselines. Notably, the F1-score for the general ad campaigns reaches 0.78," + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.375, + 0.087 + ], + "angle": 0, + "content": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada" + }, + { + "type": "header", + "bbox": [ + 0.532, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "Qi Yang, Marlo Ongpin, Sergey Nikolenko, Alfred Huang, and Aleksandr Farseev" + }, + { + "type": "table_caption", + "bbox": [ + 0.095, + 0.105, + 0.252, + 0.145 + ], + "angle": 0, + "content": "Table 1: F1-score evaluation for CTR prediction models." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.158, + 0.254, + 0.39 + ], + "angle": 0, + "content": "
AllCon- vers- sion
k-nearest neighbors0.3380.254
Random forest0.3020.293
Gradient boosting0.3490.262
AdaBoost0.2890.277
Multilayer perceptron0.6540.642
SoWide0.7020.660
SoWide-v20.7800.671
" + }, + { + "type": "title", + "bbox": [ + 0.286, + 0.108, + 0.408, + 0.12 + ], + "angle": 0, + "content": "Content Analysis" + }, + { + "type": "text", + "bbox": [ + 0.286, + 0.125, + 0.418, + 0.134 + ], + "angle": 0, + "content": "Average Predicted Score Low" + }, + { + "type": "text", + "bbox": [ + 0.287, + 0.139, + 0.347, + 0.148 + ], + "angle": 0, + "content": "\\(\\mathbb{N}^2\\) of Ads: 16" + }, + { + "type": "text", + "bbox": [ + 0.286, + 0.154, + 0.369, + 0.163 + ], + "angle": 0, + "content": "\\(\\mathsf{N}^{\\mathsf{g}}\\) of Creatives: 25" + }, + { + "type": "image", + "bbox": [ + 0.272, + 0.172, + 0.425, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.434, + 0.173, + 0.588, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.59, + 0.174, + 0.749, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.752, + 0.174, + 0.914, + 0.351 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.266, + 0.361, + 0.915, + 0.389 + ], + "angle": 0, + "content": "Figure 4: Sample predicted low-CTR and high-CTR advertising banners and heatmap visualizations of the attention layers involved in the prediction." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.402, + 0.483, + 0.47 + ], + "angle": 0, + "content": "which confirms that the SoWide-v2 approach effectively accommodates the hierarchical structure inherent in advertising data, enabling effective multimodal learning for the prediction of ad performance. The results validate that SoWide-v2 is a state-of-the-art CTR prediction model." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.471, + 0.483, + 0.61 + ], + "angle": 0, + "content": "Thus far, we have introduced a framework that enables advertisers to assess the potential performance of their own content, and potentially that of their competitors, prior to its launch. This represents a valuable tactical capability that had been unavailable to the community for a long time. However, once a creative marketer gains access to the initial prediction results for a specific content piece, another significant challenge lies in comprehending the underlying factors that contribute to its success or failure. What went wrong, what was done right, and how do we amplify the right parts while suppressing the wrong parts?" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.61, + 0.483, + 0.748 + ], + "angle": 0, + "content": "One approach to address this question would be to utilize various visualization techniques, specifically those that illustrate the decision-making process of the neural network while making a specific prediction. If the prediction is accurate, such visualizations are believed to provide insights into the underlying reasons behind the performance of a creative asset. Consequently, these visualizations can serve as a valuable resource for marketers in making informed decisions regarding the inclusion of specific components in future creative assets, enabling them to effectively communicate their requirements to the creative team." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.748, + 0.483, + 0.887 + ], + "angle": 0, + "content": "Figure 4 shows an illustrative example of such visualizations. The attention layers of the neural network used for CTR prediction are visualized as interactive heatmaps, revealing the specific regions of the banner that significantly influence the model's predictions. The figure shows that such attention visualization highlights the key elements within a Singtel banner (on the left) that contribute to its high predicted performance, namely gaming-related objects such as the monitor and the game controller. These elements effectively convey the message that a superior internet connection is essential for enhancing the gaming experience. Similarly, for the Circles.Life" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.402, + 0.915, + 0.471 + ], + "angle": 0, + "content": "banner (on the right), the areas featuring the lady in the background were found to negatively impact its performance. This suggests that the composition and balance of the banner's visual elements, particularly in relation to the overall content creation practices, may have influenced its predicted low CTR values." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.485, + 0.871, + 0.533 + ], + "angle": 0, + "content": "3 EXPLAINING HUMANS FOR HUMANS: SODA, A LLM-BASED ADVERTISING ANALYSIS FRAMEWORK" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.537, + 0.915, + 0.702 + ], + "angle": 0, + "content": "In the last section, we presented a system capable of effectively capturing and visualizing the factors influencing the predicted performance of ads in terms of potential CTR. However, in domains such as performance marketing decisions for choosing specific creatives for campaigns often need to be made under tight deadlines, sometimes literally in a few hours or even minutes. Moreover, these industries are characterized by large volumes of creative assets and a multitude of promotions simultaneously conducted by competitors in an \"always-on\" manner. Therefore, one cannot run detailed analysis for every ad, and there is a dire need for further automated analytical tools that would enable human marketers to rapidly comprehend available data and information." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.703, + 0.915, + 0.841 + ], + "angle": 0, + "content": "In order to address this challenge, we present an extension to our framework with a novel approach that leverages large language models (LLMs) to provide additional insights into the data and CTR predictions, called SODA. We outline an analytical pipeline that incorporates LLM-based explanations and generations and demonstrate its practical applications through a real-world scenario involving four Singapore telecommunication companies. This part of our framework aims to enhance the interpretability and comprehension of the data, facilitating better-informed decision-making in these fast-paced and competitive industries." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.841, + 0.915, + 0.897 + ], + "angle": 0, + "content": "The general pipeline of our analysis is shown in Figure 5. First, we use an LLM to extract specific well-defined insights from input ads, such as the needs served by this ad, products being advertised, and more (see below); the insights can be stored as features in" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.525, + 0.088 + ], + "angle": 0, + "content": "Against Opacity: Explainable AI and Large Language Models for Effective Digital Advertising" + }, + { + "type": "header", + "bbox": [ + 0.626, + 0.076, + 0.913, + 0.087 + ], + "angle": 0, + "content": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada" + }, + { + "type": "image", + "bbox": [ + 0.099, + 0.104, + 0.331, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.1, + 0.553, + 0.329, + 0.581 + ], + "angle": 0, + "content": "Figure 5: General pipeline of our LLM-based analysis" + }, + { + "type": "image", + "bbox": [ + 0.353, + 0.104, + 0.923, + 0.243 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.542, + 0.248, + 0.734, + 0.263 + ], + "angle": 0, + "content": "Figure 6: Sample ad analysis" + }, + { + "type": "image", + "bbox": [ + 0.358, + 0.27, + 0.917, + 0.432 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.478, + 0.434, + 0.798, + 0.448 + ], + "angle": 0, + "content": "Figure 7: Sample brand persona analysis results" + }, + { + "type": "image", + "bbox": [ + 0.358, + 0.454, + 0.917, + 0.566 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.464, + 0.569, + 0.813, + 0.583 + ], + "angle": 0, + "content": "Figure 8: Sample brand comparative analysis results" + }, + { + "type": "image", + "bbox": [ + 0.085, + 0.587, + 0.915, + 0.757 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.332, + 0.761, + 0.665, + 0.776 + ], + "angle": 0, + "content": "Figure 9: Sample user persona generation results." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.785, + 0.484, + 0.897 + ], + "angle": 0, + "content": "tabular form. Then, we use these features together with further engineered prompts to perform generalizing analysis of a brand's target audiences, personas, needs, and insights expressed by the ads, tone, and topical categories of the current campaign and others. The resulting coverage of the campaign closely reflects campaign analysis commonly performed by marketing professionals and can be further used to tune the brand's message, tone, target audiences, personas, and more. The pipeline is also able to present" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.785, + 0.916, + 0.826 + ], + "angle": 0, + "content": "specific examples helpful for marketing professionals, such as sample (imagined) user profiles or user personas, which are also one of the common marketing tools. Let us dive into some details." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.827, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Figure 6 shows sample results of our initial experiments on ad analysis. We selected batches of ads from the Facebook Ad Library for the same brand and processed them with an LLM, customized only with natural language prompt engineering. As a result, the LLM has been able to successfully identify key features of each" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.374, + 0.087 + ], + "angle": 0, + "content": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada" + }, + { + "type": "header", + "bbox": [ + 0.532, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "Qi Yang, Marlo Ongpin, Sergey Nikolenko, Alfred Huang, and Aleksandr Farseev" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.482, + 0.232 + ], + "angle": 0, + "content": "advertisement, including excellent responses to such seemingly \"human\" questions as identifying the human need, human insight, and the main archetypes used in an ad. Moreover, answers to most questions are standardized (as the LLM was instructed) and can be subject to automated processing. This kind of analysis has always been a key part of online marketing, and to the best of our knowledge, it has never been successfully automated and scaled up before. Such tasks had always required human labeling and thus had been restricted to a few sample ads rather than the entire dataset." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.232, + 0.482, + 0.356 + ], + "angle": 0, + "content": "As the next step, we use the ads and extracted features as inputs for a number of prompts asking to summarize information in a variety of formats commonly used in content marketing. We have seen successful summarization across the board, with important insights identified by the LLM and presented in an accessible and actionable format. Fig. 7 shows a sample result of our brand persona analysis, complete with main brand values used in the ad campaigns, the goals of using them, and detailed analysis of the primary \"caregiver\" persona, including supporting examples from the data." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.357, + 0.482, + 0.426 + ], + "angle": 0, + "content": "Figure 8 shows the results of a comparative analysis of four advertising campaigns run over the same time period by different brands. Again, the LLM has correctly identified its key distinguishing factors, and the list of differences is very similar to one that could be produced by a human marketing professional." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.426, + 0.483, + 0.633 + ], + "angle": 0, + "content": "Another avenue for using state-of-the-art generative AI capabilities that we have explored is user persona generation, an important tool in content marketing that has long proven to be useful for creative work[12, 14, 40]. To produce user personas, we begin with a list of interests (either extracted as shown in Fig. 9 or obtained from the client and/or social media platform) and prompt the LLM to give examples of user descriptions that could fit such interests. Fig. 7 shows a sample resulting user persona, which is fully believable to the professionals. To make the result even more tangible, we supplement such user personas with images generated by a state-of-the-art text-image model, in this case, Stable Diffusion [31]. To make the entire pipeline self-contained we ask the original LLM to also generate the prompt for the text-image model from the user persona description and a few examples of good prompts. The results also illustrated in Fig. 7, are very promising." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.633, + 0.483, + 0.731 + ], + "angle": 0, + "content": "The LLM used in all experiments was ChatGPT based on GPT-3.5 [27], and we believe that simply switching to more powerful LLMs such as GPT-4 [26] may lead to further increased performance across all applications. Note also that while GPT-3.5 can only process text ads, GPT-4 is already able to analyze images jointly with text (this ability has not yet been made public at the time of writing), which is arguably even more important for content marketing." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.753, + 0.225, + 0.767 + ], + "angle": 0, + "content": "4 CASE STUDY" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.771, + 0.483, + 0.897 + ], + "angle": 0, + "content": "To evaluate the practical value and viability of the proposed framework expansion using large language models (LLMs) for generating rapid insights and enabling prompt marketing-related decision-making, we have engaged 12 marketing professionals currently employed at marketing departments of Business-to-Consumer (B2C) brands or advertising and marketing agencies across Singapore, China, and the UK. These professionals were selected based on their extensive experience, averaging 9 years, in managing digital marketing campaigns across various industries." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.107, + 0.915, + 0.19 + ], + "angle": 0, + "content": "The professionals were presented with the details and results of our preliminary experiments analyzing and comparing the marketing campaigns of four major telecommunication companies in Singapore, as described in Section 3. They were then asked to provide their perspectives on the usefulness, quality, and potential impact of the insights and outputs generated by our framework." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.19, + 0.915, + 0.287 + ], + "angle": 0, + "content": "All 12 professionals responded very positively about the value of our approach. They found high-level overviews of brand positioning and audience targeting strategies, enriched with specific examples, to be highly useful to gain quick familiarity with brand messaging and inspire new creative directions. The generated user personas and accompanying AI-generated images were praised for bringing additional richness and tangibility to the insights." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.287, + 0.913, + 0.383 + ], + "angle": 0, + "content": "Several professionals commented that the coherent, standardized format of the outputs would allow for efficient processing and decision-making, especially given the tight timeframes frequently faced in the industry. More senior professionals have expressed that they foresee solutions like ours significantly augmenting and accelerating essential marketing functions through the automation of repetitive, labor-intensive tasks." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.384, + 0.915, + 0.537 + ], + "angle": 0, + "content": "This highly encouraging feedback from advertising and marketing professionals suggests strong potential business value in developing and applying AI-powered solutions, such as the proposed extension of our framework, for the automation of marketing campaign analysis and strategic planning. While adoption may face initial resistance, especially from very senior professionals, many in the industry seem poised to welcome AI augmenting and enhancing their work. Our approach, focused on mimicking established human processes and outputs, appears well-suited to addressing common pain points and unlocking new efficiencies, especially in such fast-paced domains as performance marketing." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.568, + 0.665, + 0.582 + ], + "angle": 0, + "content": "5 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.586, + 0.915, + 0.807 + ], + "angle": 0, + "content": "In this work, we have presented a novel advertising analysis framework, called SODA, which amalgamates large language models, explainable artificial intelligence, and attention map visualization techniques, heralding a potential future of human-AI collaboration within the realm of digital advertising. Through the integration of LLMs and the incorporation of explainability aspects, our novel approach envisions enhanced efficiency and synergy between marketers and AI systems, hopefully leading to a new era of intelligent decision-making. We believe that our approach holds the promise of empowering a new generation of marketers to leverage advanced AI technologies effectively, fostering a deeper understanding of the underlying mechanisms driving ad performance and facilitating informed decision-making processes. Note that while we already show promising results, these are mostly preliminary experiments, and we strongly believe that this direction of research will bring many new advances in the nearest future." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.85, + 0.742, + 0.863 + ], + "angle": 0, + "content": "6 ACKNOWLEDGEMENT" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.868, + 0.915, + 0.897 + ], + "angle": 0, + "content": "This work was funded by the Russian Science Foundation grant No 22-11-00135 https://rscf.ru/en/project/22-11-00135/" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.524, + 0.088 + ], + "angle": 0, + "content": "Against Opacity: Explainable AI and Large Language Models for Effective Digital Advertising" + }, + { + "type": "header", + "bbox": [ + 0.626, + 0.076, + 0.913, + 0.087 + ], + "angle": 0, + "content": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada" + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.106, + 0.203, + 0.12 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.124, + 0.482, + 0.143 + ], + "angle": 0, + "content": "[1] [n.d.]. Best practices to potentially reduce cost per result for Meta ads. https://www.facebook.com/business/help/321695409726523." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.145, + 0.483, + 0.163 + ], + "angle": 0, + "content": "[2] [n.d.]. Netflix Prize data. https://www.kaggle.com/datasets/netflix-inc/netflix-prize-data." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.164, + 0.482, + 0.203 + ], + "angle": 0, + "content": "[3] Anton Alekseev, Elena Tutubalina, Sejeong Kwon, and Sergey Nikolenko. 2022. Near-Zero-Shot Suggestion Mining with a Little Help from WordNet. In Analysis of Images, Social Networks and Texts. Springer International Publishing, Cham, 23-36." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.204, + 0.482, + 0.234 + ], + "angle": 0, + "content": "[4] J. Bergstra, R. Bardenet, Y. Bengio, and B. Kegl. 2011. Algorithms for HyperParameter Optimization. In Advances in Neural Information Processing Systems, Vol. 24. Curran Associates, Inc." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.235, + 0.482, + 0.254 + ], + "angle": 0, + "content": "[5] K Buraya, A Farseev, and A Filchenkov. 2018. Multi-view personality profiling based on longitudinal data. Lecture Notes in Computer Science 11018 (2018), 15-27." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.255, + 0.482, + 0.284 + ], + "angle": 0, + "content": "[6] Jiawei Chen, Hande Dong, Xiang Wang, Fuli Feng, Meng Wang, and Xiangnan He. 2023. Bias and Debias in Recommender System: A Survey and Future Directions. ACM Trans. Inf. Syst. 41, 3, Article 67 (feb 2023), 39 pages." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.285, + 0.482, + 0.334 + ], + "angle": 0, + "content": "[7] H.-T. Cheng, L. Koc, J. Harmsen, T. Shaked, T. Chandra, H. Aradhye, G. Anderson, G. Corrado, W. Chai, M. Ispir, R. Anil, Z. Haque, L. Hong, V. Jain, X. Liu, and H. Shah. 2016. Wide & Deep Learning for Recommender Systems. In Proc. 1st Workshop on Deep Learning for Recommender Systems (Boston, MA, USA) (DLRS 2016). ACM, New York, NY, USA, 7-10." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.335, + 0.482, + 0.384 + ], + "angle": 0, + "content": "[8] Alok Kumar Chowdhury, Aleksandr Farseev, Prithwi Raj Chakraborty, Dian Tjondronegoro, and Vinod Chandran. 2017. Automatic classification of physical exercises from wearable sensors using small dataset from non-laboratory settings. In 2017 IEEE Life Sciences Conference (LSC). 111-114. https://doi.org/10.1109/LSC.2017.8268156" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.385, + 0.482, + 0.415 + ], + "angle": 0, + "content": "[9] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proc. 2019 NAACL. ACL, 4171-4186." + }, + { + "type": "ref_text", + "bbox": [ + 0.088, + 0.416, + 0.482, + 0.455 + ], + "angle": 0, + "content": "[10] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. 2020. An image is worth \\(16 \\times 16\\) words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.456, + 0.482, + 0.506 + ], + "angle": 0, + "content": "[11] Yali Du, Yinwei Wei, Wei Ji, Fan Liu, Xin Luo, and Liqiang Nie. 2023. Multi-Queue Momentum Contrast for Microvideo-Product Retrieval. In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining (Singapore, Singapore) (WSDM '23). ACM, New York, NY, USA, 1003–1011. https://doi.org/10.1145/3539597.3570405" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.507, + 0.482, + 0.555 + ], + "angle": 0, + "content": "[12] Aleksandr Farseev. 2023. Under the Hood of Social Media Advertising: How Do We Use AI Responsibly for Advertising Targeting and Creative Evaluation. In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining (Singapore, Singapore) (WSDM '23). ACM, New York, NY, USA, 1281-1282. https://doi.org/10.1145/3539597.3575791" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.557, + 0.482, + 0.586 + ], + "angle": 0, + "content": "[13] A Farseev, N Gukov, I Gossoudarev, and U Zarichnyak. 2014. Cross-platform online venue and user community recommendation based upon social networks data mining. Computer Instruments in Education 6 (2014), 28-38." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.587, + 0.482, + 0.636 + ], + "angle": 0, + "content": "[14] Aleksandr Farseev, Kirill Lepikhin, Hendrik Schwartz, Eu Khoon Ang, and Kenny Powar. 2018. SoMin.AI: Social Multimedia Influencer Discovery Marketplace. In Proceedings of the 26th ACM International Conference on Multimedia (Seoul, Republic of Korea) (MM '18). ACM, New York, NY, USA, 1234-1236. https://doi.org/10.1145/3240508.3241387" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.637, + 0.482, + 0.687 + ], + "angle": 0, + "content": "[15] Aleksandr Farseev, Qi Yang, Andrey Filchenkov, Kirill Lepikhin, Yu-Yi Chu-Farseeva, and Daron-Benjamin Loo. 2021. SoMin.Ai: Personality-Driven Content Generation Platform. In Proceedings of the 14th ACM International Conference on Web Search and Data Mining (WSDM '21). ACM, New York, NY, USA, 890-893. https://doi.org/10.1145/3437963.3441714" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.688, + 0.482, + 0.717 + ], + "angle": 0, + "content": "[16] H. Fukui, T. Hirakawa, T. Yamashita, and H. Fujiyoshi. 2019. Attention Branch Network: Learning of Attention Mechanism for Visual Explanation. Computer Vision and Pattern Recognition (2019), 10705-10714." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.718, + 0.482, + 0.747 + ], + "angle": 0, + "content": "[17] T. Ge, H. Liu, P. Yi, S. Huang, Z. Zhang, X. Zhu, Y. Zhang, K. Gai, L. Zhao, G. Zhou, K. Chen, S. Liu, H. Yi, Z. Hu, B. Liu, and P. Sun. 2018. Image Matters: Visually Modeling User Behaviors Using Advanced Model Server. 2087-2095." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.748, + 0.482, + 0.797 + ], + "angle": 0, + "content": "[18] Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu, and Tat-Seng Chua. 2017. Neural Collaborative Filtering. In Proceedings of the 26th International Conference on World Wide Web (Perth, Australia) (WWW'17). International World Wide Web Conferences Steering Committee, Republic and Canton of Geneva, CHE, 173-182. https://doi.org/10.1145/3038912.3052569" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.798, + 0.482, + 0.838 + ], + "angle": 0, + "content": "[19] X. He, J. Pan, O. Jin, T. Xu, B. Liu, T. Xu, Y. Shi, A. Atallah, R. Herbrich, S. Bowers, and J. Q. Candela. 2014. Practical Lessons from Predicting Clicks on Ads at Facebook. In Proc. 8th International Workshop on Data Mining for Online Advertising (ADKDD'14). ACM, 1-9." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.839, + 0.482, + 0.889 + ], + "angle": 0, + "content": "[20] Alfred Huang, Qi Yang, Sergey Nikolenko, Marlo Ongpin, Ilia Gossoudarev, Ngoc Yen Duong, Kirill Lepikhin, Sergey Vishnyakov, Yuyi Chu-Farseeva, and Aleksandr Farseev. 2023. SoCraft: Advertiser-Level Predictive Scoring for Creative Performance on Meta. In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining (Singapore, Singapore) (WSDM '23). ACM," + }, + { + "type": "list", + "bbox": [ + 0.087, + 0.124, + 0.483, + 0.889 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.11, + 0.888, + 0.12 + ], + "angle": 0, + "content": "New York, NY, USA, 1132-1135. https://doi.org/10.1145/3539597.3573032" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.121, + 0.913, + 0.14 + ], + "angle": 0, + "content": "[21] X. Huang, A. Khetan, M. Cvitkovic, and Z. Karnin. 2020. TabTransformer: Tabular Data Modeling Using Contextual Embeddings. (2020). arXiv:2012.06678 [cs.LG]" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.141, + 0.913, + 0.19 + ], + "angle": 0, + "content": "[22] Sergei Koltcov, Olessia Koltsova, and Sergey Nikolenko. 2014. Latent Dirichlet Allocation: Stability and Applications to Studies of User-Generated Content. In Proceedings of the 2014 ACM Conference on Web Science (Bloomington, Indiana, USA) (WebSci '14). ACM, New York, NY, USA, 161–165. https://doi.org/10.1145/2615569.2615680" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.191, + 0.913, + 0.231 + ], + "angle": 0, + "content": "[23] H. B. McMahan, G. Holt, D. Sculley, M. Young, D. Ebner, J. Grady, L. Nie, T. Phillips, E. Davydov, D. Golovin, S. Chikkerur, D. Liu, M. Wattenberg, A. M. Hrafinkelsson, T. Boulos, and J. Kubica. 2013. Ad Click Prediction: A View from the Trenches. In Proc. 19th ACM SIGKDD (KDD '13). ACM, 1222-1230." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.232, + 0.913, + 0.25 + ], + "angle": 0, + "content": "[24] Meta. 2023. Meta Blueprint. https://www.facebookblueprint.com/student/catalog Accessed on June 06, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.251, + 0.913, + 0.291 + ], + "angle": 0, + "content": "[25] Sergey Nikolenko. 2015. SVD-LDA: Topic Modeling for Full-Text Recommender Systems. In Advances in Artificial Intelligence and Its Applications, Odbulia Pichardo Lagunas, Oscar Herrera Alcantara, and Gustavo Arroyo Figueroa (Eds.). Springer International Publishing, Cham, 67-79." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.292, + 0.849, + 0.302 + ], + "angle": 0, + "content": "[26] OpenAI. 2023. GPT-4 Technical Report. arXiv:2303.08774 [cs.CL]" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.303, + 0.913, + 0.351 + ], + "angle": 0, + "content": "[27] Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob Hilton, Fraser Kelton, Luke Miller, Maddie Simens, Amanda Askell, Peter Welinder, Paul Christiano, Jan Leike, and Ryan Lowe. 2022. Training language models to follow instructions with human feedback. arXiv:2203.02155 [cs.CL]" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.352, + 0.913, + 0.381 + ], + "angle": 0, + "content": "[28] Wentao Ouyang, Xiwu Zhang, Shukui Ren, Chao Qi, Zhaojie Liu, and Yanlong Du. 2019. Representation Learning-Assisted Click-Through Rate Prediction. In Proc. 28th IJCAI, 4561-4567. https://doi.org/10.24963/ijcai.2019/634" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.382, + 0.913, + 0.401 + ], + "angle": 0, + "content": "[29] Francesco Ricci, Lior Rokach, Bracha Shapira, and Paul B. Kantor. 2010. Recommender Systems Handbook (1st ed.). Springer-Verlag, Berlin, Heidelberg." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.402, + 0.913, + 0.431 + ], + "angle": 0, + "content": "[30] Matthew Richardson, Ewa Dominowska, and Robert Ragno. 2007. Predicting Clicks: Estimating the Click-through Rate for New Ads. In Proc. 16th WWWW (WWW '07), ACM, 521-530." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.432, + 0.913, + 0.471 + ], + "angle": 0, + "content": "[31] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. 2022. High-Resolution Image Synthesis With Latent Diffusion Models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). 10684-10695." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.473, + 0.913, + 0.532 + ], + "angle": 0, + "content": "[32] Andrey Savchenko, Anton Alekseev, Sejeong Kwon, Elena Tutubalina, Evgeny Myasnikov, and Sergey Nikolenko. 2020. Ad Lingua: Text Classification Improves Symbolism Prediction in Image Advertisements. In Proceedings of the 28th International Conference on Computational Linguistics. International Committee on Computational Linguistics, Barcelona, Spain (Online), 1886-1892. https://doi.org/10.18653/v1/2020.coling-main.171" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.533, + 0.913, + 0.582 + ], + "angle": 0, + "content": "[33] Ilya Shenbin, Anton Alekseev, Elena Tutubalina, Valentin Malykh, and Sergey I. Nikolenko. 2020. RecVAE: A New Variational Autoencoder for Top-N Recommendations with Implicit Feedback. In Proceedings of the 13th International Conference on Web Search and Data Mining (Houston, TX, USA) (WSDM '20). ACM, New York, NY, USA, 528-536. https://doi.org/10.1145/3336191.3371831" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.583, + 0.913, + 0.612 + ], + "angle": 0, + "content": "[34] Elena Tutubalina and Sergey I. Nikolenko. 2017. Demographic Prediction based on User Reviews about Medications. Computación y sistemas 21, 2 (2017), 227-241." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.613, + 0.913, + 0.643 + ], + "angle": 0, + "content": "[35] Elena Tutubalina and Sergey I. Nikolenko. 2018. Exploring convolutional neural networks and topic models for user profiling from drug reviews. Multimedia Tools and Applications 77, 4 (2018), 4791-4809." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.644, + 0.913, + 0.693 + ], + "angle": 0, + "content": "[36] Wenjie Wang, Fuli Feng, Liqiang Nie, and Tat-Seng Chua. 2022. User-Controllable Recommendation Against Filter Bubbles. In Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval (Madrid, Spain) (SIGIR '22). ACM, New York, NY, USA, 1251-1261. https://doi.org/10.1145/3477495.3532075" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.694, + 0.913, + 0.744 + ], + "angle": 0, + "content": "[37] Qi Yang, Aleksandr Farseev, and Andrey Filchenkov. 2021. Two-Faced Humans on Twitter and Facebook: Harvesting Social Multimedia for Human Personality Profiling. In Proceedings of the 2021 Workshop on Intelligent Cross-Data Analysis and Retrieval (Taipei, Taiwan) (ICDAR '21). ACM, New York, NY, USA, 39-47. https://doi.org/10.1145/3463944.3469270" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.745, + 0.913, + 0.784 + ], + "angle": 0, + "content": "[38] Qi Yang, Aleksandr Farseev, Sergey Nikolenko, and Andrey Filchenkov. 2022. Do we behave differently on Twitter and Facebook: Multi-view social network user personality profiling for content recommendation. Frontiers in Big Data 5 (2022). https://doi.org/10.3389/fdata.2022.931206" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.785, + 0.913, + 0.834 + ], + "angle": 0, + "content": "[39] Qi Yang, Sergey Nikolenko, Alfred Huang, and Aleksandr Farseev. 2022. Personality-Driven Social Multimedia Content Recommendation. In Proceedings of the 30th ACM International Conference on Multimedia (Lisboa, Portugal) (MM '22). ACM, New York, NY, USA, 7290-7299. https://doi.org/10.1145/3503161.3548769" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.835, + 0.913, + 0.885 + ], + "angle": 0, + "content": "[40] Qi Yang, Christos Tzelepis, Sergey Nikolenko, Ioannis Patras, and Aleksandr Farseev. 2023. \"Just To See You Smile\": SMILEY, a Voice-Guided GUY GAN. In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining (Singapore, Singapore) (WSDM '23). ACM, New York, NY, USA, 1196-1199. https://doi.org/10.1145/3539597.3573031" + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.11, + 0.913, + 0.885 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_20xxx/2504.20064/3484c7dd-5472-4f2c-ab8a-d4410cc59cb3_origin.pdf b/data/2025/2504_20xxx/2504.20064/3484c7dd-5472-4f2c-ab8a-d4410cc59cb3_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..8a4b7f4ba9d5c236ee4a60ab8c4e31e2060efd4c --- /dev/null +++ b/data/2025/2504_20xxx/2504.20064/3484c7dd-5472-4f2c-ab8a-d4410cc59cb3_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:094a2129ddccf43f7eb18c22f55878e830edf921cdb95cf7389e486ca2a05862 +size 14379076 diff --git a/data/2025/2504_20xxx/2504.20064/full.md b/data/2025/2504_20xxx/2504.20064/full.md new file mode 100644 index 0000000000000000000000000000000000000000..cd585a6572e538b0923d0dd989eeb26ccde185af --- /dev/null +++ b/data/2025/2504_20xxx/2504.20064/full.md @@ -0,0 +1,253 @@ +# Against Opacity: Explainable AI and Large Language Models for Effective Digital Advertising + +Qi Yang + +yangqi@itmo.ru + +ITMO University + +Saint Petersburg, Russia + +Marlo Ongpin + +marlo@somin.ai + +SoMin.ai Research + +Singapore, Singapore + +Sergey Nikolenko + +sergey@logic.pdmi.ras.ru + +ITMO University + +Steklov Institute of Mathematics + +Saint Petersburg, Russia + +Alfred Huang + +alfred@somin.ai + +SoMin.ai Research + +Singapore, Singapore + +Aleksandr Farseev + +sasha@somin.ai + +SoMin.ai Research + +Singapore, Singapore + +![](images/a4bbad9354d0c96540ac46d8b2ae9e19f85349fb003e3dde95f5c9f708a55630.jpg) +Figure 1: Overview of the LLM-based advertising analysis framework SODA (Section 3). + +# ABSTRACT + +The opaqueness of modern digital advertising, exemplified by platforms such as Meta Ads, raises concerns regarding their autonomous control over audience targeting, pricing structures, and ad relevancy assessments. Locked in their leading positions by network effects, "Metas and Googles of the world" attract countless advertisers who rely on intuition, with billions of dollars lost on ineffective social media ads. The platforms' algorithms use huge amounts of data unavailable to advertisers, and the algorithms themselves are opaque as well. This lack of transparency hinders the advertisers' ability to make informed decisions and necessitates efforts to promote transparency, standardize industry metrics, and strengthen regulatory frameworks. In this work, we propose novel ways to assist marketers in optimizing their advertising strategies via machine learning techniques designed to analyze and evaluate content, in particular, predict the click-through rates (CTR) of novel advertising content. Another important problem is that large volumes of data available in the competitive landscape, e.g., competitors' ads, + +Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org. + +MM '23, October 29-November 3, 2023, Ottawa, ON, Canada + +© 2023 Copyright held by the owner/author(s). Publication rights licensed to ACM. + +ACM ISBN 979-8-4007-0108-5/23/10...$15.00 + +https://doi.org/10.1145/3581783.3612817 + +impede the ability of marketers to derive meaningful insights. This leads to a pressing need for a novel approach that would allow us to summarize and comprehend complex data. Inspired by the success of ChatGPT in bridging the gap between large language models (LLMs) and a broader non-technical audience, we propose a novel system that facilitates marketers in data interpretation, called SODA, that merges LLMs with explainable AI, enabling better human-AI collaboration with an emphasis on the domain of digital marketing and advertising. By combining LLMs and explainability features, in particular modern text-image models, we aim to improve the synergy between human marketers and AI systems. + +# CCS CONCEPTS + +- Information systems $\rightarrow$ Learning to rank; Multimedia and multimodal retrieval; Computational advertising; Multimedia and multimodal retrieval; Computational advertising. + +# KEYWORDS + +Digital Advertising, Ads Performance Prediction, Deep Learning, Large Language Model, Explainable AI + +# ACM Reference Format: + +Qi Yang, Marlo Ongpin, Sergey Nikolenko, Alfred Huang, and Aleksandr Farseev. 2023. Against Opacity: Explanable AI and Large Language Models for Effective Digital Advertising. In Proceedings of the 31st ACM International Conference on Multimedia (MM '23), October 29-November 3, 2023, Ottawa, ON, Canada. ACM, New York, NY, USA, 7 pages. https://doi.org/10.1145/3581783.3612817 + +# 1 INTRODUCTION + +The online advertising industry is the poster child of data science. Google and Facebook became industry-dominating behemoths to a large extent because they excelled at crunching the numbers and showing the best online ads to their primary assets, user audiences, while Amazon did the same for item recommendations in its online store. In academia, the Netflix Prize Competition [2] devoted to movie recommendations was one of the first open competitions with serious prizes and organization, a pioneer that would eventually lead to Kaggle and innumerable open leaderboards that nowadays track the state of the art in virtually every measurable ML task. The Netflix Prize itself has led to significant breakthroughs in collaborative filtering, and its dataset is still used as one of the standard benchmarks [2]. One definitely cannot say that the field of recommender systems, in particular online advertising, lacks the attention of machine learning researchers, and many important advances keep being made every month [6, 11, 18, 29, 33, 36-38]. + +However, most advances are being made on the side of the platforms (ad marketplaces) such as Facebook (Meta) [19], Google [23], Alibaba [28], or Taobao [17], and therefore they are not accessible to the advertising platform users, i.e., digital marketers. Collaborative filtering datasets are understandably private, and marketing professionals that create advertising content do not have access to the data needed to predict their own future performance. Note that these predictions are often self-fulfilling: if, e.g., Meta models predict low click-through ratio (CTR) for your ad, Meta will charge you more for showing it, probably show it less, and the campaign will likely be a failure regardless of how accurate the CTR prediction has been [1]. Often, there is no practical way to control the cost of advertising; technically, if a platform decided to charge more money for an ad nothing could prevent them from doing so. + +Moreover, even if marketing professionals could run the corresponding models, that would only be of modest help with their job, which is content creation. Suppose that a model tells you that your new ad is a bad match for your audience, and the expected CTR is low. How do you fix that? It cannot be a pure collaborative filtering model since it has to predict CTR for a new ad that has not been shown to users yet, but it is still an opaque model that maps your ad content into a latent representation via "giant inscrutable matrices". So all you can do even if you have such a model is to try and make a different ad, get a new prediction, and work via trial and error. + +One potential way to address this issue involves visualizing the decision-making process of a neural network, providing marketers with insights into the rationale behind specific predictions made by AI models [3, 15, 20, 39]. Therefore, our first contribution in this work is a new variation of a state-of-the-art CTR prediction model coupled with a mechanism for analyzing the ad images (banners) via an image attention mechanism. The results provide human-understandable analysis that can be turned into actionable insights. + +However, this is only the beginning. Individual ad analysis via explainable ML models has proven beneficial in scenarios such as individual content evaluation prior to starting an advertising campaign, but it is much less practical when applied to large volumes of images and text ads in real-world settings. The time constraints faced by marketers impede their ability to effectively process and extract key content traits in their own advertising practices. + +In our opinion, the long-awaited revolution in digital advertising and content marketing will occur when both the ads themselves and the results of opaque models can be explained in ways that are both understandable for humans and actionable in terms of business results. We believe that the time for this revolution is now, and in this work we show that large language models such as GPT-3.5 [27] and GPT-4 [26] are already increasingly able to explain the "reasoning" behind recommender models and provide aggregate insights about advertising campaigns consisting of hundreds of individual ads. Prior to LLMs, approaches to aggregate text corpora in the context of recommender systems had been proposed via topic modeling [22, 25], sometimes coupled with deep learning [35] and user profiling [5, 34], but topic modeling is based on the bag-of-words assumption and cannot summarize text as an LLM does; visual understanding of ads had also been explored with convolutional networks [32]. + +Therefore, our main contribution is that we present preliminary results for a road-map that could achieve this holy grail of content marketing: provide explainable, actionable insights into advertising content along with possible strategies for improvement with models that could work on the side of a small advertising agency rather than a huge platform. We begin with direct CTR prediction and then proceed to provide explainable insights and content recommendations with large language models and even visual generative AI (see Fig. 1). + +The paper is organized as follows: in Section 2, we present an improved model for CTR prediction and visualization procedures for advertising banners, Section 3 introduces our approach to explainable ad analysis with large language models, Section 4 shows the results of a case study that confirms the effectiveness of our approach, and Section 5 concludes the paper. + +# 2 EXPLAINING OPAQUE AI WITH AI: CTR PREDICTION AND VISUALIZATIONS + +The lack of transparency within the advertising sector has been widely acknowledged as a primary reason for the inefficient allocation of advertising budgets. Notably, the responsibility for determining the cost per 1,000 impressions (CPM) and selecting competing entities in a programmatic auction rests primarily with the platform (we will use Meta as the running example). This decision-making process is in fact a result of numerous intricately interwoven machine learning (ML) models designed to dynamically match content with precise targeting criteria and individualized user profiles on Meta. These models are instrumental in estimating the likelihood of a user engaging in specific actions within the Meta ecosystem. + +As an illustration, consider a hypothetical Meta user named Simon who is anticipated to click on an ad (perform the "Click" action) with the slogan "Up your game nights with an ultra-immersive setup" displayed on a Meta Ad banner showcasing Singtel, a mobile operator company, and their home internet broadband product (Fig. 2). This prediction is done by Meta's internal ML models, and quite often contradicts Meta's widely publicized "best practices" blueprints [24]. Here, it is crucial to acknowledge the additional information that advertising engines such as Meta take into account. They are free to use factors such as Simon's past visits to telecom websites, pictures showing computer games in Simon's account + +![](images/c4b3bb86fba7bd6e20ae39746b404a55331d25eab19f268ee82422c44c38dc6a.jpg) +Figure 2: A sample advertising banner on Meta. + +on Meta, and much more. Moreover, these factors include Meta's own revenue considerations, prediction of the ad's "relevance" by Meta itself, timing of displaying this ad during the day, recency of the ad account (to incentivize new advertisers with improved performance), and the internal "ranking" of advertisers based on their history of disapproved ads, a process overseen by Meta. Regrettably, these predictive estimations are further influenced by the accuracy of Meta's ML models that profile Simon's content. For instance, when Simon is observed putting a diaper on his child, Meta's object recognition system might mistakenly associate it with an "Inflatable Boat / Fishing" interest; this is a real-life incident on the Meta platform, and such mistakes compound into suboptimal ad-related predictions down the line. + +Confronted with numerous intricate technical hurdles, digital marketers, who frequently lack technical expertise, often resort to intuitive judgment or a trial-and-error methodology in formulating and examining their creative assets within digital advertising platforms. Thus, it becomes especially important to have comprehensive data-driven guidance, not only for optimizing outcomes but also for developing cost-effective practices. + +One classical approach to providing this kind of guidance is to train an ML framework to predict the prospective performance of an advertising banner before allocating actual advertising budgets. In this section, we focus on the prediction of the click-through rate (CTR) metric, known to be closely associated with ad performance, particularly in the context of awareness and traffic advertising objectives. We used the recently presented SoWide model [20] as a sample state-of-the-art CTR prediction approach; its architecture is shown in Fig. 3. We updated the architecture slightly by replacing the ABN model for image processing with a Vision Transformer (ViT) [10], resulting in performance improvements, so we call it SoWide-v2. + +Unlike conventional supervised learning, where a data point $(\mathbf{x},y)$ consists of both feature vector $\mathbf{x}$ and target variable $y$ , the SoWide-v2 approach incorporates data from the campaign, ad set, and potentially multiple creatives to construct the features for each ad. Data points in the model leverage text and images from all creatives together with their respective estimated performances; in case of videos, we extract keyframes to obtain multiple distinct images included as additional training data. Furthermore, we extract low-level features from tabular, textual, and visual content, resulting + +![](images/112cfbfecb1cc833ca3409d9807c516bac7c5d357b277dc9d792da4090557401.jpg) +Figure 3: SoWide-v2 architecture + +in a comprehensive dataset that can be used to train a model capable of predicting content performance based on information from multiple modalities. After preprocessing, extracted features serve as inputs for the click-through rate (CTR) prediction model. SoWidev2 makes the assumption that the performance of advertisements converges to an underlying global distribution [8, 13, 30], so we normalize CTR values into categorical representations. Predicted scores indicate whether the content can be classified as "below average", "average", or "above average" in terms of quality. + +In essence, SoWide-v2 is a neural network based on the "wide and deep models" approach well known in recommender systems [7]. To facilitate representation learning for multimodal content, SoWide-v2 employs separate embedding layers and fully connected layers for each set of features. This process allows it to project sparse, high-dimensional, and low-level features into higher-level representations. To handle each modality appropriately, SoWide-v2 employs distinct deep models for feature processing. Specifically, it uses the TabTransformer [21] for tabular features and multilingual BERT [9] for textual content; the original SoWide used the attention branch network [16] for images but for SoWide-v2 we replaced it with a Vision Transformer [10]. Additionally, a fully connected layer is utilized to project the sparse high-dimensional features into a denser low-dimensional representation. These representations are subsequently concatenated and fed into another fully connected layer, followed by a softmax function for CTR classification, facilitating end-to-end joint learning. The model is trained using stochastic gradient descent (SGD) for 100 epochs, and hyper-parameter optimization is performed with the tree-structured Parzen estimator [4]. + +For evaluation results, we use the same datasets and baselines as the original SoWide paper [20], comparing the performance of SoWide-v2 against the original SoWide and several conventional machine learning baselines (there appears to be no previous work on CTR prediction before [20] that could be used for a direct comparison) using the F1-score, a widely used classification metric. Evaluation is done in two different settings: for general ad campaigns and also specifically for campaigns targeting the "Conversion" objective, which represents the two most prevalent and significant ad campaign objectives. The results shown in Table 1 demonstrate that the SoWide-v2 model presents an improvement over the original SoWide, and both models significantly outperform all classical ML baselines. Notably, the F1-score for the general ad campaigns reaches 0.78, + +Table 1: F1-score evaluation for CTR prediction models. + +
AllCon- vers- sion
k-nearest neighbors0.3380.254
Random forest0.3020.293
Gradient boosting0.3490.262
AdaBoost0.2890.277
Multilayer perceptron0.6540.642
SoWide0.7020.660
SoWide-v20.7800.671
+ +# Content Analysis + +Average Predicted Score Low + +$\mathbb{N}^2$ of Ads: 16 + +$\mathsf{N}^{\mathsf{g}}$ of Creatives: 25 + +![](images/acf1d5dc66621283575e40bafc9c60cf3bb0955f77888301abc3921b2143422b.jpg) +Figure 4: Sample predicted low-CTR and high-CTR advertising banners and heatmap visualizations of the attention layers involved in the prediction. + +![](images/4ffae6c66addebde4791fb51721097dd80f7180f5efcc93a3f6e017e285a8ce0.jpg) + +![](images/4bd9708227b626fdfcf010ad44a602d119731127edb8f5742a99ca7b77df091b.jpg) + +![](images/1eaf6b5d2722b766c5e99af3c555f3538205c698b313ac8e7426daef3cae5cd9.jpg) + +which confirms that the SoWide-v2 approach effectively accommodates the hierarchical structure inherent in advertising data, enabling effective multimodal learning for the prediction of ad performance. The results validate that SoWide-v2 is a state-of-the-art CTR prediction model. + +Thus far, we have introduced a framework that enables advertisers to assess the potential performance of their own content, and potentially that of their competitors, prior to its launch. This represents a valuable tactical capability that had been unavailable to the community for a long time. However, once a creative marketer gains access to the initial prediction results for a specific content piece, another significant challenge lies in comprehending the underlying factors that contribute to its success or failure. What went wrong, what was done right, and how do we amplify the right parts while suppressing the wrong parts? + +One approach to address this question would be to utilize various visualization techniques, specifically those that illustrate the decision-making process of the neural network while making a specific prediction. If the prediction is accurate, such visualizations are believed to provide insights into the underlying reasons behind the performance of a creative asset. Consequently, these visualizations can serve as a valuable resource for marketers in making informed decisions regarding the inclusion of specific components in future creative assets, enabling them to effectively communicate their requirements to the creative team. + +Figure 4 shows an illustrative example of such visualizations. The attention layers of the neural network used for CTR prediction are visualized as interactive heatmaps, revealing the specific regions of the banner that significantly influence the model's predictions. The figure shows that such attention visualization highlights the key elements within a Singtel banner (on the left) that contribute to its high predicted performance, namely gaming-related objects such as the monitor and the game controller. These elements effectively convey the message that a superior internet connection is essential for enhancing the gaming experience. Similarly, for the Circles.Life + +banner (on the right), the areas featuring the lady in the background were found to negatively impact its performance. This suggests that the composition and balance of the banner's visual elements, particularly in relation to the overall content creation practices, may have influenced its predicted low CTR values. + +# 3 EXPLAINING HUMANS FOR HUMANS: SODA, A LLM-BASED ADVERTISING ANALYSIS FRAMEWORK + +In the last section, we presented a system capable of effectively capturing and visualizing the factors influencing the predicted performance of ads in terms of potential CTR. However, in domains such as performance marketing decisions for choosing specific creatives for campaigns often need to be made under tight deadlines, sometimes literally in a few hours or even minutes. Moreover, these industries are characterized by large volumes of creative assets and a multitude of promotions simultaneously conducted by competitors in an "always-on" manner. Therefore, one cannot run detailed analysis for every ad, and there is a dire need for further automated analytical tools that would enable human marketers to rapidly comprehend available data and information. + +In order to address this challenge, we present an extension to our framework with a novel approach that leverages large language models (LLMs) to provide additional insights into the data and CTR predictions, called SODA. We outline an analytical pipeline that incorporates LLM-based explanations and generations and demonstrate its practical applications through a real-world scenario involving four Singapore telecommunication companies. This part of our framework aims to enhance the interpretability and comprehension of the data, facilitating better-informed decision-making in these fast-paced and competitive industries. + +The general pipeline of our analysis is shown in Figure 5. First, we use an LLM to extract specific well-defined insights from input ads, such as the needs served by this ad, products being advertised, and more (see below); the insights can be stored as features in + +![](images/40419723bf25fd8c75bdcd9f0b89e0e34c20da748523b3877ae518fe1cc38457.jpg) +Figure 5: General pipeline of our LLM-based analysis + +![](images/f8147ac1545644272b718d70da3125c80dd591c88d270d8ca53e061febc8d9b0.jpg) +Figure 6: Sample ad analysis + +![](images/78bab7b34c60a5323f9d775cdbd3ac401be8ac157a094161dd56c211689b0508.jpg) +Figure 7: Sample brand persona analysis results + +![](images/c3822a688e66daeb84d3b44532b2a359cfc5d46323c77f761cbffddcf11b66aa.jpg) + +![](images/b3e7aeef26d19972d49e9be7539552e1ba549f10a543d21e3e3e05c7533dc5e6.jpg) +Figure 8: Sample brand comparative analysis results +Figure 9: Sample user persona generation results. + +tabular form. Then, we use these features together with further engineered prompts to perform generalizing analysis of a brand's target audiences, personas, needs, and insights expressed by the ads, tone, and topical categories of the current campaign and others. The resulting coverage of the campaign closely reflects campaign analysis commonly performed by marketing professionals and can be further used to tune the brand's message, tone, target audiences, personas, and more. The pipeline is also able to present + +specific examples helpful for marketing professionals, such as sample (imagined) user profiles or user personas, which are also one of the common marketing tools. Let us dive into some details. + +Figure 6 shows sample results of our initial experiments on ad analysis. We selected batches of ads from the Facebook Ad Library for the same brand and processed them with an LLM, customized only with natural language prompt engineering. As a result, the LLM has been able to successfully identify key features of each + +advertisement, including excellent responses to such seemingly "human" questions as identifying the human need, human insight, and the main archetypes used in an ad. Moreover, answers to most questions are standardized (as the LLM was instructed) and can be subject to automated processing. This kind of analysis has always been a key part of online marketing, and to the best of our knowledge, it has never been successfully automated and scaled up before. Such tasks had always required human labeling and thus had been restricted to a few sample ads rather than the entire dataset. + +As the next step, we use the ads and extracted features as inputs for a number of prompts asking to summarize information in a variety of formats commonly used in content marketing. We have seen successful summarization across the board, with important insights identified by the LLM and presented in an accessible and actionable format. Fig. 7 shows a sample result of our brand persona analysis, complete with main brand values used in the ad campaigns, the goals of using them, and detailed analysis of the primary "caregiver" persona, including supporting examples from the data. + +Figure 8 shows the results of a comparative analysis of four advertising campaigns run over the same time period by different brands. Again, the LLM has correctly identified its key distinguishing factors, and the list of differences is very similar to one that could be produced by a human marketing professional. + +Another avenue for using state-of-the-art generative AI capabilities that we have explored is user persona generation, an important tool in content marketing that has long proven to be useful for creative work[12, 14, 40]. To produce user personas, we begin with a list of interests (either extracted as shown in Fig. 9 or obtained from the client and/or social media platform) and prompt the LLM to give examples of user descriptions that could fit such interests. Fig. 7 shows a sample resulting user persona, which is fully believable to the professionals. To make the result even more tangible, we supplement such user personas with images generated by a state-of-the-art text-image model, in this case, Stable Diffusion [31]. To make the entire pipeline self-contained we ask the original LLM to also generate the prompt for the text-image model from the user persona description and a few examples of good prompts. The results also illustrated in Fig. 7, are very promising. + +The LLM used in all experiments was ChatGPT based on GPT-3.5 [27], and we believe that simply switching to more powerful LLMs such as GPT-4 [26] may lead to further increased performance across all applications. Note also that while GPT-3.5 can only process text ads, GPT-4 is already able to analyze images jointly with text (this ability has not yet been made public at the time of writing), which is arguably even more important for content marketing. + +# 4 CASE STUDY + +To evaluate the practical value and viability of the proposed framework expansion using large language models (LLMs) for generating rapid insights and enabling prompt marketing-related decision-making, we have engaged 12 marketing professionals currently employed at marketing departments of Business-to-Consumer (B2C) brands or advertising and marketing agencies across Singapore, China, and the UK. These professionals were selected based on their extensive experience, averaging 9 years, in managing digital marketing campaigns across various industries. + +The professionals were presented with the details and results of our preliminary experiments analyzing and comparing the marketing campaigns of four major telecommunication companies in Singapore, as described in Section 3. They were then asked to provide their perspectives on the usefulness, quality, and potential impact of the insights and outputs generated by our framework. + +All 12 professionals responded very positively about the value of our approach. They found high-level overviews of brand positioning and audience targeting strategies, enriched with specific examples, to be highly useful to gain quick familiarity with brand messaging and inspire new creative directions. The generated user personas and accompanying AI-generated images were praised for bringing additional richness and tangibility to the insights. + +Several professionals commented that the coherent, standardized format of the outputs would allow for efficient processing and decision-making, especially given the tight timeframes frequently faced in the industry. More senior professionals have expressed that they foresee solutions like ours significantly augmenting and accelerating essential marketing functions through the automation of repetitive, labor-intensive tasks. + +This highly encouraging feedback from advertising and marketing professionals suggests strong potential business value in developing and applying AI-powered solutions, such as the proposed extension of our framework, for the automation of marketing campaign analysis and strategic planning. While adoption may face initial resistance, especially from very senior professionals, many in the industry seem poised to welcome AI augmenting and enhancing their work. Our approach, focused on mimicking established human processes and outputs, appears well-suited to addressing common pain points and unlocking new efficiencies, especially in such fast-paced domains as performance marketing. + +# 5 CONCLUSION + +In this work, we have presented a novel advertising analysis framework, called SODA, which amalgamates large language models, explainable artificial intelligence, and attention map visualization techniques, heralding a potential future of human-AI collaboration within the realm of digital advertising. Through the integration of LLMs and the incorporation of explainability aspects, our novel approach envisions enhanced efficiency and synergy between marketers and AI systems, hopefully leading to a new era of intelligent decision-making. We believe that our approach holds the promise of empowering a new generation of marketers to leverage advanced AI technologies effectively, fostering a deeper understanding of the underlying mechanisms driving ad performance and facilitating informed decision-making processes. Note that while we already show promising results, these are mostly preliminary experiments, and we strongly believe that this direction of research will bring many new advances in the nearest future. + +# 6 ACKNOWLEDGEMENT + +This work was funded by the Russian Science Foundation grant No 22-11-00135 https://rscf.ru/en/project/22-11-00135/ + +# REFERENCES + +[1] [n.d.]. Best practices to potentially reduce cost per result for Meta ads. https://www.facebook.com/business/help/321695409726523. +[2] [n.d.]. Netflix Prize data. https://www.kaggle.com/datasets/netflix-inc/netflix-prize-data. +[3] Anton Alekseev, Elena Tutubalina, Sejeong Kwon, and Sergey Nikolenko. 2022. Near-Zero-Shot Suggestion Mining with a Little Help from WordNet. In Analysis of Images, Social Networks and Texts. Springer International Publishing, Cham, 23-36. +[4] J. Bergstra, R. Bardenet, Y. Bengio, and B. Kegl. 2011. Algorithms for HyperParameter Optimization. In Advances in Neural Information Processing Systems, Vol. 24. Curran Associates, Inc. +[5] K Buraya, A Farseev, and A Filchenkov. 2018. Multi-view personality profiling based on longitudinal data. Lecture Notes in Computer Science 11018 (2018), 15-27. +[6] Jiawei Chen, Hande Dong, Xiang Wang, Fuli Feng, Meng Wang, and Xiangnan He. 2023. Bias and Debias in Recommender System: A Survey and Future Directions. ACM Trans. Inf. Syst. 41, 3, Article 67 (feb 2023), 39 pages. +[7] H.-T. Cheng, L. Koc, J. Harmsen, T. Shaked, T. Chandra, H. Aradhye, G. Anderson, G. Corrado, W. Chai, M. Ispir, R. Anil, Z. Haque, L. Hong, V. Jain, X. Liu, and H. Shah. 2016. Wide & Deep Learning for Recommender Systems. In Proc. 1st Workshop on Deep Learning for Recommender Systems (Boston, MA, USA) (DLRS 2016). ACM, New York, NY, USA, 7-10. +[8] Alok Kumar Chowdhury, Aleksandr Farseev, Prithwi Raj Chakraborty, Dian Tjondronegoro, and Vinod Chandran. 2017. Automatic classification of physical exercises from wearable sensors using small dataset from non-laboratory settings. In 2017 IEEE Life Sciences Conference (LSC). 111-114. https://doi.org/10.1109/LSC.2017.8268156 +[9] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proc. 2019 NAACL. ACL, 4171-4186. +[10] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. 2020. An image is worth $16 \times 16$ words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020). +[11] Yali Du, Yinwei Wei, Wei Ji, Fan Liu, Xin Luo, and Liqiang Nie. 2023. Multi-Queue Momentum Contrast for Microvideo-Product Retrieval. In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining (Singapore, Singapore) (WSDM '23). ACM, New York, NY, USA, 1003–1011. https://doi.org/10.1145/3539597.3570405 +[12] Aleksandr Farseev. 2023. Under the Hood of Social Media Advertising: How Do We Use AI Responsibly for Advertising Targeting and Creative Evaluation. In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining (Singapore, Singapore) (WSDM '23). ACM, New York, NY, USA, 1281-1282. https://doi.org/10.1145/3539597.3575791 +[13] A Farseev, N Gukov, I Gossoudarev, and U Zarichnyak. 2014. Cross-platform online venue and user community recommendation based upon social networks data mining. Computer Instruments in Education 6 (2014), 28-38. +[14] Aleksandr Farseev, Kirill Lepikhin, Hendrik Schwartz, Eu Khoon Ang, and Kenny Powar. 2018. SoMin.AI: Social Multimedia Influencer Discovery Marketplace. In Proceedings of the 26th ACM International Conference on Multimedia (Seoul, Republic of Korea) (MM '18). ACM, New York, NY, USA, 1234-1236. https://doi.org/10.1145/3240508.3241387 +[15] Aleksandr Farseev, Qi Yang, Andrey Filchenkov, Kirill Lepikhin, Yu-Yi Chu-Farseeva, and Daron-Benjamin Loo. 2021. SoMin.Ai: Personality-Driven Content Generation Platform. In Proceedings of the 14th ACM International Conference on Web Search and Data Mining (WSDM '21). ACM, New York, NY, USA, 890-893. https://doi.org/10.1145/3437963.3441714 +[16] H. Fukui, T. Hirakawa, T. Yamashita, and H. Fujiyoshi. 2019. Attention Branch Network: Learning of Attention Mechanism for Visual Explanation. Computer Vision and Pattern Recognition (2019), 10705-10714. +[17] T. Ge, H. Liu, P. Yi, S. Huang, Z. Zhang, X. Zhu, Y. Zhang, K. Gai, L. Zhao, G. Zhou, K. Chen, S. Liu, H. Yi, Z. Hu, B. Liu, and P. Sun. 2018. Image Matters: Visually Modeling User Behaviors Using Advanced Model Server. 2087-2095. +[18] Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu, and Tat-Seng Chua. 2017. Neural Collaborative Filtering. In Proceedings of the 26th International Conference on World Wide Web (Perth, Australia) (WWW'17). International World Wide Web Conferences Steering Committee, Republic and Canton of Geneva, CHE, 173-182. https://doi.org/10.1145/3038912.3052569 +[19] X. He, J. Pan, O. Jin, T. Xu, B. Liu, T. Xu, Y. Shi, A. Atallah, R. Herbrich, S. Bowers, and J. Q. Candela. 2014. Practical Lessons from Predicting Clicks on Ads at Facebook. In Proc. 8th International Workshop on Data Mining for Online Advertising (ADKDD'14). ACM, 1-9. +[20] Alfred Huang, Qi Yang, Sergey Nikolenko, Marlo Ongpin, Ilia Gossoudarev, Ngoc Yen Duong, Kirill Lepikhin, Sergey Vishnyakov, Yuyi Chu-Farseeva, and Aleksandr Farseev. 2023. SoCraft: Advertiser-Level Predictive Scoring for Creative Performance on Meta. In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining (Singapore, Singapore) (WSDM '23). ACM, + +New York, NY, USA, 1132-1135. https://doi.org/10.1145/3539597.3573032 +[21] X. Huang, A. Khetan, M. Cvitkovic, and Z. Karnin. 2020. TabTransformer: Tabular Data Modeling Using Contextual Embeddings. (2020). arXiv:2012.06678 [cs.LG] +[22] Sergei Koltcov, Olessia Koltsova, and Sergey Nikolenko. 2014. Latent Dirichlet Allocation: Stability and Applications to Studies of User-Generated Content. In Proceedings of the 2014 ACM Conference on Web Science (Bloomington, Indiana, USA) (WebSci '14). ACM, New York, NY, USA, 161–165. https://doi.org/10.1145/2615569.2615680 +[23] H. B. McMahan, G. Holt, D. Sculley, M. Young, D. Ebner, J. Grady, L. Nie, T. Phillips, E. Davydov, D. Golovin, S. Chikkerur, D. Liu, M. Wattenberg, A. M. Hrafinkelsson, T. Boulos, and J. Kubica. 2013. Ad Click Prediction: A View from the Trenches. In Proc. 19th ACM SIGKDD (KDD '13). ACM, 1222-1230. +[24] Meta. 2023. Meta Blueprint. https://www.facebookblueprint.com/student/catalog Accessed on June 06, 2023. +[25] Sergey Nikolenko. 2015. SVD-LDA: Topic Modeling for Full-Text Recommender Systems. In Advances in Artificial Intelligence and Its Applications, Odbulia Pichardo Lagunas, Oscar Herrera Alcantara, and Gustavo Arroyo Figueroa (Eds.). Springer International Publishing, Cham, 67-79. +[26] OpenAI. 2023. GPT-4 Technical Report. arXiv:2303.08774 [cs.CL] +[27] Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob Hilton, Fraser Kelton, Luke Miller, Maddie Simens, Amanda Askell, Peter Welinder, Paul Christiano, Jan Leike, and Ryan Lowe. 2022. Training language models to follow instructions with human feedback. arXiv:2203.02155 [cs.CL] +[28] Wentao Ouyang, Xiwu Zhang, Shukui Ren, Chao Qi, Zhaojie Liu, and Yanlong Du. 2019. Representation Learning-Assisted Click-Through Rate Prediction. In Proc. 28th IJCAI, 4561-4567. https://doi.org/10.24963/ijcai.2019/634 +[29] Francesco Ricci, Lior Rokach, Bracha Shapira, and Paul B. Kantor. 2010. Recommender Systems Handbook (1st ed.). Springer-Verlag, Berlin, Heidelberg. +[30] Matthew Richardson, Ewa Dominowska, and Robert Ragno. 2007. Predicting Clicks: Estimating the Click-through Rate for New Ads. In Proc. 16th WWWW (WWW '07), ACM, 521-530. +[31] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. 2022. High-Resolution Image Synthesis With Latent Diffusion Models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). 10684-10695. +[32] Andrey Savchenko, Anton Alekseev, Sejeong Kwon, Elena Tutubalina, Evgeny Myasnikov, and Sergey Nikolenko. 2020. Ad Lingua: Text Classification Improves Symbolism Prediction in Image Advertisements. In Proceedings of the 28th International Conference on Computational Linguistics. International Committee on Computational Linguistics, Barcelona, Spain (Online), 1886-1892. https://doi.org/10.18653/v1/2020.coling-main.171 +[33] Ilya Shenbin, Anton Alekseev, Elena Tutubalina, Valentin Malykh, and Sergey I. Nikolenko. 2020. RecVAE: A New Variational Autoencoder for Top-N Recommendations with Implicit Feedback. In Proceedings of the 13th International Conference on Web Search and Data Mining (Houston, TX, USA) (WSDM '20). ACM, New York, NY, USA, 528-536. https://doi.org/10.1145/3336191.3371831 +[34] Elena Tutubalina and Sergey I. Nikolenko. 2017. Demographic Prediction based on User Reviews about Medications. Computación y sistemas 21, 2 (2017), 227-241. +[35] Elena Tutubalina and Sergey I. Nikolenko. 2018. Exploring convolutional neural networks and topic models for user profiling from drug reviews. Multimedia Tools and Applications 77, 4 (2018), 4791-4809. +[36] Wenjie Wang, Fuli Feng, Liqiang Nie, and Tat-Seng Chua. 2022. User-Controllable Recommendation Against Filter Bubbles. In Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval (Madrid, Spain) (SIGIR '22). ACM, New York, NY, USA, 1251-1261. https://doi.org/10.1145/3477495.3532075 +[37] Qi Yang, Aleksandr Farseev, and Andrey Filchenkov. 2021. Two-Faced Humans on Twitter and Facebook: Harvesting Social Multimedia for Human Personality Profiling. In Proceedings of the 2021 Workshop on Intelligent Cross-Data Analysis and Retrieval (Taipei, Taiwan) (ICDAR '21). ACM, New York, NY, USA, 39-47. https://doi.org/10.1145/3463944.3469270 +[38] Qi Yang, Aleksandr Farseev, Sergey Nikolenko, and Andrey Filchenkov. 2022. Do we behave differently on Twitter and Facebook: Multi-view social network user personality profiling for content recommendation. Frontiers in Big Data 5 (2022). https://doi.org/10.3389/fdata.2022.931206 +[39] Qi Yang, Sergey Nikolenko, Alfred Huang, and Aleksandr Farseev. 2022. Personality-Driven Social Multimedia Content Recommendation. In Proceedings of the 30th ACM International Conference on Multimedia (Lisboa, Portugal) (MM '22). ACM, New York, NY, USA, 7290-7299. https://doi.org/10.1145/3503161.3548769 +[40] Qi Yang, Christos Tzelepis, Sergey Nikolenko, Ioannis Patras, and Aleksandr Farseev. 2023. "Just To See You Smile": SMILEY, a Voice-Guided GUY GAN. In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining (Singapore, Singapore) (WSDM '23). ACM, New York, NY, USA, 1196-1199. https://doi.org/10.1145/3539597.3573031 \ No newline at end of file diff --git a/data/2025/2504_20xxx/2504.20064/images/112cfbfecb1cc833ca3409d9807c516bac7c5d357b277dc9d792da4090557401.jpg b/data/2025/2504_20xxx/2504.20064/images/112cfbfecb1cc833ca3409d9807c516bac7c5d357b277dc9d792da4090557401.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe9f0e4968d801ef9127fa8d1b3b73526d061ac7 --- /dev/null +++ b/data/2025/2504_20xxx/2504.20064/images/112cfbfecb1cc833ca3409d9807c516bac7c5d357b277dc9d792da4090557401.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca4e425f22fb703161cd5cbd12717dc4405491064c72ba86a8e6255151e35a3d +size 43705 diff --git a/data/2025/2504_20xxx/2504.20064/images/1eaf6b5d2722b766c5e99af3c555f3538205c698b313ac8e7426daef3cae5cd9.jpg b/data/2025/2504_20xxx/2504.20064/images/1eaf6b5d2722b766c5e99af3c555f3538205c698b313ac8e7426daef3cae5cd9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..569785f97bae6201bba1f7f53106878cd0ec24e1 --- /dev/null +++ b/data/2025/2504_20xxx/2504.20064/images/1eaf6b5d2722b766c5e99af3c555f3538205c698b313ac8e7426daef3cae5cd9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e5936d8bd976c0a0b0e0acecc37dbb8349da55fadb7a0376a77b0f53f6a2ba0 +size 13083 diff --git a/data/2025/2504_20xxx/2504.20064/images/40419723bf25fd8c75bdcd9f0b89e0e34c20da748523b3877ae518fe1cc38457.jpg b/data/2025/2504_20xxx/2504.20064/images/40419723bf25fd8c75bdcd9f0b89e0e34c20da748523b3877ae518fe1cc38457.jpg new file mode 100644 index 0000000000000000000000000000000000000000..76cc686c7333daa5c6c22b958502241c5bfd19da --- /dev/null +++ b/data/2025/2504_20xxx/2504.20064/images/40419723bf25fd8c75bdcd9f0b89e0e34c20da748523b3877ae518fe1cc38457.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:969ff4fcce1b0be782d965fec873389ee828db10c23c9058a5011c9a45b106f5 +size 64918 diff --git a/data/2025/2504_20xxx/2504.20064/images/4bd9708227b626fdfcf010ad44a602d119731127edb8f5742a99ca7b77df091b.jpg b/data/2025/2504_20xxx/2504.20064/images/4bd9708227b626fdfcf010ad44a602d119731127edb8f5742a99ca7b77df091b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..91dcdd8103f51730adbe4e2521efa35f08404c55 --- /dev/null +++ b/data/2025/2504_20xxx/2504.20064/images/4bd9708227b626fdfcf010ad44a602d119731127edb8f5742a99ca7b77df091b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebf79e7beb486d574d8ea47bc02dbdb352b9b595ff758e7df6ec145a6b25e6e5 +size 16805 diff --git a/data/2025/2504_20xxx/2504.20064/images/4ffae6c66addebde4791fb51721097dd80f7180f5efcc93a3f6e017e285a8ce0.jpg b/data/2025/2504_20xxx/2504.20064/images/4ffae6c66addebde4791fb51721097dd80f7180f5efcc93a3f6e017e285a8ce0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6065c49771a67be1801594b943c9978571eed850 --- /dev/null +++ b/data/2025/2504_20xxx/2504.20064/images/4ffae6c66addebde4791fb51721097dd80f7180f5efcc93a3f6e017e285a8ce0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c25b1719e3e33632594ecfabcf598fd6281728a385a8c28463714bb661af743 +size 14853 diff --git a/data/2025/2504_20xxx/2504.20064/images/78bab7b34c60a5323f9d775cdbd3ac401be8ac157a094161dd56c211689b0508.jpg b/data/2025/2504_20xxx/2504.20064/images/78bab7b34c60a5323f9d775cdbd3ac401be8ac157a094161dd56c211689b0508.jpg new file mode 100644 index 0000000000000000000000000000000000000000..26e003ba3416931061bb74aeb7523c0405bc4693 --- /dev/null +++ b/data/2025/2504_20xxx/2504.20064/images/78bab7b34c60a5323f9d775cdbd3ac401be8ac157a094161dd56c211689b0508.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67a068ed5ce26581dc188587843ac6b031f399b3f811be341cdc5b6d59c1a6ba +size 86261 diff --git a/data/2025/2504_20xxx/2504.20064/images/a4bbad9354d0c96540ac46d8b2ae9e19f85349fb003e3dde95f5c9f708a55630.jpg b/data/2025/2504_20xxx/2504.20064/images/a4bbad9354d0c96540ac46d8b2ae9e19f85349fb003e3dde95f5c9f708a55630.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd8267edbe9b7c00fd6ab5735e421fb3fa7dc190 --- /dev/null +++ b/data/2025/2504_20xxx/2504.20064/images/a4bbad9354d0c96540ac46d8b2ae9e19f85349fb003e3dde95f5c9f708a55630.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:68aaeeba8b3ecf767c5774287c6099fe3839546a20010406a68ab29bc0d1cb7f +size 74418 diff --git a/data/2025/2504_20xxx/2504.20064/images/acf1d5dc66621283575e40bafc9c60cf3bb0955f77888301abc3921b2143422b.jpg b/data/2025/2504_20xxx/2504.20064/images/acf1d5dc66621283575e40bafc9c60cf3bb0955f77888301abc3921b2143422b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d03a75603c52117d3d43fe2b05a5be3549927b7d --- /dev/null +++ b/data/2025/2504_20xxx/2504.20064/images/acf1d5dc66621283575e40bafc9c60cf3bb0955f77888301abc3921b2143422b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d82596c1a231ab5e14e641d3d5ed2c7de0dc37f7b6d0486b34957f3b90ced5e +size 18576 diff --git a/data/2025/2504_20xxx/2504.20064/images/b3e7aeef26d19972d49e9be7539552e1ba549f10a543d21e3e3e05c7533dc5e6.jpg b/data/2025/2504_20xxx/2504.20064/images/b3e7aeef26d19972d49e9be7539552e1ba549f10a543d21e3e3e05c7533dc5e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9abe28a8efe14a1fe881065f1da5aa4bc338afda --- /dev/null +++ b/data/2025/2504_20xxx/2504.20064/images/b3e7aeef26d19972d49e9be7539552e1ba549f10a543d21e3e3e05c7533dc5e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f989be6f96ef7899c8f43d98635f1c23ef53996ba9563fbe641bb0c59550f42 +size 96587 diff --git a/data/2025/2504_20xxx/2504.20064/images/c3822a688e66daeb84d3b44532b2a359cfc5d46323c77f761cbffddcf11b66aa.jpg b/data/2025/2504_20xxx/2504.20064/images/c3822a688e66daeb84d3b44532b2a359cfc5d46323c77f761cbffddcf11b66aa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fdfd3efe912f0e59e9e743ad23102cd18e7d7d2b --- /dev/null +++ b/data/2025/2504_20xxx/2504.20064/images/c3822a688e66daeb84d3b44532b2a359cfc5d46323c77f761cbffddcf11b66aa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47e1fee3bc7d443d93b04c3ad2ce4c198c1751568379425de81ea8c3baff4946 +size 57842 diff --git a/data/2025/2504_20xxx/2504.20064/images/c4b3bb86fba7bd6e20ae39746b404a55331d25eab19f268ee82422c44c38dc6a.jpg b/data/2025/2504_20xxx/2504.20064/images/c4b3bb86fba7bd6e20ae39746b404a55331d25eab19f268ee82422c44c38dc6a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..abaf4aef88c70ba03abd8a0623f85364a9837359 --- /dev/null +++ b/data/2025/2504_20xxx/2504.20064/images/c4b3bb86fba7bd6e20ae39746b404a55331d25eab19f268ee82422c44c38dc6a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07a86a965f1c161c08437cfc15a1e624c697f5141de58a983629d885c4f3b4fd +size 32898 diff --git a/data/2025/2504_20xxx/2504.20064/images/d81a1b7e2888f389fc9dce32e0582f52aaa9238833b2c0fb8e377e88c7f887e1.jpg b/data/2025/2504_20xxx/2504.20064/images/d81a1b7e2888f389fc9dce32e0582f52aaa9238833b2c0fb8e377e88c7f887e1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1a030f7b7b2cb60386c8eef90015b2e15855e83d --- /dev/null +++ b/data/2025/2504_20xxx/2504.20064/images/d81a1b7e2888f389fc9dce32e0582f52aaa9238833b2c0fb8e377e88c7f887e1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc2f24145c96829a6d35cfaa5818c24520bac20bbd4d16040d48838726d5f3d7 +size 23568 diff --git a/data/2025/2504_20xxx/2504.20064/images/f8147ac1545644272b718d70da3125c80dd591c88d270d8ca53e061febc8d9b0.jpg b/data/2025/2504_20xxx/2504.20064/images/f8147ac1545644272b718d70da3125c80dd591c88d270d8ca53e061febc8d9b0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a01344d057c694ac7c9c9fbd24fa1fe7313da0f --- /dev/null +++ b/data/2025/2504_20xxx/2504.20064/images/f8147ac1545644272b718d70da3125c80dd591c88d270d8ca53e061febc8d9b0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:345ca6c84a901d00261edd7b0e4d2525aa54d272e42635f14658a88062d73b40 +size 68387 diff --git a/data/2025/2504_20xxx/2504.20064/layout.json b/data/2025/2504_20xxx/2504.20064/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..21ccc86e0c2703ac2227e468c66a9d83f0cdbb82 --- /dev/null +++ b/data/2025/2504_20xxx/2504.20064/layout.json @@ -0,0 +1,5800 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 53, + 79, + 557, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 79, + 557, + 120 + ], + "spans": [ + { + "bbox": [ + 53, + 79, + 557, + 120 + ], + "type": "text", + "content": "Against Opacity: Explainable AI and Large Language Models for Effective Digital Advertising" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 129, + 162, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 129, + 162, + 142 + ], + "spans": [ + { + "bbox": [ + 121, + 129, + 162, + 142 + ], + "type": "text", + "content": "Qi Yang" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 143, + 175, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 143, + 175, + 154 + ], + "spans": [ + { + "bbox": [ + 107, + 143, + 175, + 154 + ], + "type": "text", + "content": "yangqi@itmo.ru" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 155, + 177, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 155, + 177, + 165 + ], + "spans": [ + { + "bbox": [ + 106, + 155, + 177, + 165 + ], + "type": "text", + "content": "ITMO University" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 91, + 167, + 191, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 167, + 191, + 178 + ], + "spans": [ + { + "bbox": [ + 91, + 167, + 191, + 178 + ], + "type": "text", + "content": "Saint Petersburg, Russia" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 270, + 129, + 341, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 270, + 129, + 341, + 142 + ], + "spans": [ + { + "bbox": [ + 270, + 129, + 341, + 142 + ], + "type": "text", + "content": "Marlo Ongpin" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 270, + 143, + 340, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 270, + 143, + 340, + 153 + ], + "spans": [ + { + "bbox": [ + 270, + 143, + 340, + 153 + ], + "type": "text", + "content": "marlo@somin.ai" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 268, + 155, + 343, + 164 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 268, + 155, + 343, + 164 + ], + "spans": [ + { + "bbox": [ + 268, + 155, + 343, + 164 + ], + "type": "text", + "content": "SoMin.ai Research" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 261, + 167, + 350, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 261, + 167, + 350, + 178 + ], + "spans": [ + { + "bbox": [ + 261, + 167, + 350, + 178 + ], + "type": "text", + "content": "Singapore, Singapore" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 425, + 129, + 514, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 425, + 129, + 514, + 142 + ], + "spans": [ + { + "bbox": [ + 425, + 129, + 514, + 142 + ], + "type": "text", + "content": "Sergey Nikolenko" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 417, + 143, + 522, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 417, + 143, + 522, + 154 + ], + "spans": [ + { + "bbox": [ + 417, + 143, + 522, + 154 + ], + "type": "text", + "content": "sergey@logic.pdmi.ras.ru" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 434, + 155, + 506, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 434, + 155, + 506, + 166 + ], + "spans": [ + { + "bbox": [ + 434, + 155, + 506, + 166 + ], + "type": "text", + "content": "ITMO University" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 403, + 167, + 536, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 403, + 167, + 536, + 177 + ], + "spans": [ + { + "bbox": [ + 403, + 167, + 536, + 177 + ], + "type": "text", + "content": "Steklov Institute of Mathematics" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 420, + 178, + 519, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 420, + 178, + 519, + 190 + ], + "spans": [ + { + "bbox": [ + 420, + 178, + 519, + 190 + ], + "type": "text", + "content": "Saint Petersburg, Russia" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 188, + 199, + 258, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 199, + 258, + 213 + ], + "spans": [ + { + "bbox": [ + 188, + 199, + 258, + 213 + ], + "type": "text", + "content": "Alfred Huang" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 188, + 213, + 258, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 213, + 258, + 224 + ], + "spans": [ + { + "bbox": [ + 188, + 213, + 258, + 224 + ], + "type": "text", + "content": "alfred@somin.ai" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 185, + 225, + 261, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 185, + 225, + 261, + 235 + ], + "spans": [ + { + "bbox": [ + 185, + 225, + 261, + 235 + ], + "type": "text", + "content": "SoMin.ai Research" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 179, + 237, + 267, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 237, + 267, + 248 + ], + "spans": [ + { + "bbox": [ + 179, + 237, + 267, + 248 + ], + "type": "text", + "content": "Singapore, Singapore" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 341, + 199, + 433, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 341, + 199, + 433, + 211 + ], + "spans": [ + { + "bbox": [ + 341, + 199, + 433, + 211 + ], + "type": "text", + "content": "Aleksandr Farseev" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 353, + 213, + 421, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 353, + 213, + 421, + 223 + ], + "spans": [ + { + "bbox": [ + 353, + 213, + 421, + 223 + ], + "type": "text", + "content": "sasha@somin.ai" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 348, + 225, + 425, + 235 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 348, + 225, + 425, + 235 + ], + "spans": [ + { + "bbox": [ + 348, + 225, + 425, + 235 + ], + "type": "text", + "content": "SoMin.ai Research" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 343, + 237, + 431, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 343, + 237, + 431, + 248 + ], + "spans": [ + { + "bbox": [ + 343, + 237, + 431, + 248 + ], + "type": "text", + "content": "Singapore, Singapore" + } + ] + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 76, + 261, + 533, + 381 + ], + "blocks": [ + { + "bbox": [ + 76, + 261, + 533, + 381 + ], + "lines": [ + { + "bbox": [ + 76, + 261, + 533, + 381 + ], + "spans": [ + { + "bbox": [ + 76, + 261, + 533, + 381 + ], + "type": "image", + "image_path": "a4bbad9354d0c96540ac46d8b2ae9e19f85349fb003e3dde95f5c9f708a55630.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 126, + 393, + 482, + 405 + ], + "lines": [ + { + "bbox": [ + 126, + 393, + 482, + 405 + ], + "spans": [ + { + "bbox": [ + 126, + 393, + 482, + 405 + ], + "type": "text", + "content": "Figure 1: Overview of the LLM-based advertising analysis framework SODA (Section 3)." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 23 + }, + { + "bbox": [ + 51, + 410, + 112, + 420 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 410, + 112, + 420 + ], + "spans": [ + { + "bbox": [ + 51, + 410, + 112, + 420 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 50, + 424, + 296, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 424, + 296, + 611 + ], + "spans": [ + { + "bbox": [ + 50, + 424, + 296, + 611 + ], + "type": "text", + "content": "The opaqueness of modern digital advertising, exemplified by platforms such as Meta Ads, raises concerns regarding their autonomous control over audience targeting, pricing structures, and ad relevancy assessments. Locked in their leading positions by network effects, \"Metas and Googles of the world\" attract countless advertisers who rely on intuition, with billions of dollars lost on ineffective social media ads. The platforms' algorithms use huge amounts of data unavailable to advertisers, and the algorithms themselves are opaque as well. This lack of transparency hinders the advertisers' ability to make informed decisions and necessitates efforts to promote transparency, standardize industry metrics, and strengthen regulatory frameworks. In this work, we propose novel ways to assist marketers in optimizing their advertising strategies via machine learning techniques designed to analyze and evaluate content, in particular, predict the click-through rates (CTR) of novel advertising content. Another important problem is that large volumes of data available in the competitive landscape, e.g., competitors' ads," + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 50, + 618, + 295, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 618, + 295, + 675 + ], + "spans": [ + { + "bbox": [ + 50, + 618, + 295, + 675 + ], + "type": "text", + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 52, + 676, + 219, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 676, + 219, + 684 + ], + "spans": [ + { + "bbox": [ + 52, + 676, + 219, + 684 + ], + "type": "text", + "content": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 52, + 685, + 289, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 685, + 289, + 693 + ], + "spans": [ + { + "bbox": [ + 52, + 685, + 289, + 693 + ], + "type": "text", + "content": "© 2023 Copyright held by the owner/author(s). Publication rights licensed to ACM." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 52, + 693, + 180, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 693, + 180, + 700 + ], + "spans": [ + { + "bbox": [ + 52, + 693, + 180, + 700 + ], + "type": "text", + "content": "ACM ISBN 979-8-4007-0108-5/23/10...$15.00" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 52, + 700, + 167, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 700, + 167, + 709 + ], + "spans": [ + { + "bbox": [ + 52, + 700, + 167, + 709 + ], + "type": "text", + "content": "https://doi.org/10.1145/3581783.3612817" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 313, + 411, + 560, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 411, + 560, + 532 + ], + "spans": [ + { + "bbox": [ + 313, + 411, + 560, + 532 + ], + "type": "text", + "content": "impede the ability of marketers to derive meaningful insights. This leads to a pressing need for a novel approach that would allow us to summarize and comprehend complex data. Inspired by the success of ChatGPT in bridging the gap between large language models (LLMs) and a broader non-technical audience, we propose a novel system that facilitates marketers in data interpretation, called SODA, that merges LLMs with explainable AI, enabling better human-AI collaboration with an emphasis on the domain of digital marketing and advertising. By combining LLMs and explainability features, in particular modern text-image models, we aim to improve the synergy between human marketers and AI systems." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 315, + 540, + 399, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 540, + 399, + 552 + ], + "spans": [ + { + "bbox": [ + 315, + 540, + 399, + 552 + ], + "type": "text", + "content": "CCS CONCEPTS" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 314, + 555, + 560, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 555, + 560, + 589 + ], + "spans": [ + { + "bbox": [ + 314, + 555, + 560, + 589 + ], + "type": "text", + "content": "- Information systems " + }, + { + "bbox": [ + 314, + 555, + 560, + 589 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 314, + 555, + 560, + 589 + ], + "type": "text", + "content": " Learning to rank; Multimedia and multimodal retrieval; Computational advertising; Multimedia and multimodal retrieval; Computational advertising." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 315, + 598, + 380, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 598, + 380, + 609 + ], + "spans": [ + { + "bbox": [ + 315, + 598, + 380, + 609 + ], + "type": "text", + "content": "KEYWORDS" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 314, + 613, + 559, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 613, + 559, + 635 + ], + "spans": [ + { + "bbox": [ + 314, + 613, + 559, + 635 + ], + "type": "text", + "content": "Digital Advertising, Ads Performance Prediction, Deep Learning, Large Language Model, Explainable AI" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 315, + 639, + 405, + 648 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 639, + 405, + 648 + ], + "spans": [ + { + "bbox": [ + 315, + 639, + 405, + 648 + ], + "type": "text", + "content": "ACM Reference Format:" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 314, + 649, + 559, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 649, + 559, + 708 + ], + "spans": [ + { + "bbox": [ + 314, + 649, + 559, + 708 + ], + "type": "text", + "content": "Qi Yang, Marlo Ongpin, Sergey Nikolenko, Alfred Huang, and Aleksandr Farseev. 2023. Against Opacity: Explanable AI and Large Language Models for Effective Digital Advertising. In Proceedings of the 31st ACM International Conference on Multimedia (MM '23), October 29-November 3, 2023, Ottawa, ON, Canada. ACM, New York, NY, USA, 7 pages. https://doi.org/10.1145/3581783.3612817" + } + ] + } + ], + "index": 38 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 216, + 37, + 561 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 216, + 37, + 561 + ], + "spans": [ + { + "bbox": [ + 14, + 216, + 37, + 561 + ], + "type": "text", + "content": "arXiv:2504.20064v1 [cs.IR] 22 Apr 2025" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 83, + 157, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 83, + 157, + 95 + ], + "spans": [ + { + "bbox": [ + 52, + 83, + 157, + 95 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 99, + 294, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 99, + 294, + 274 + ], + "spans": [ + { + "bbox": [ + 50, + 99, + 294, + 274 + ], + "type": "text", + "content": "The online advertising industry is the poster child of data science. Google and Facebook became industry-dominating behemoths to a large extent because they excelled at crunching the numbers and showing the best online ads to their primary assets, user audiences, while Amazon did the same for item recommendations in its online store. In academia, the Netflix Prize Competition [2] devoted to movie recommendations was one of the first open competitions with serious prizes and organization, a pioneer that would eventually lead to Kaggle and innumerable open leaderboards that nowadays track the state of the art in virtually every measurable ML task. The Netflix Prize itself has led to significant breakthroughs in collaborative filtering, and its dataset is still used as one of the standard benchmarks [2]. One definitely cannot say that the field of recommender systems, in particular online advertising, lacks the attention of machine learning researchers, and many important advances keep being made every month [6, 11, 18, 29, 33, 36-38]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 274, + 295, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 274, + 295, + 427 + ], + "spans": [ + { + "bbox": [ + 50, + 274, + 295, + 427 + ], + "type": "text", + "content": "However, most advances are being made on the side of the platforms (ad marketplaces) such as Facebook (Meta) [19], Google [23], Alibaba [28], or Taobao [17], and therefore they are not accessible to the advertising platform users, i.e., digital marketers. Collaborative filtering datasets are understandably private, and marketing professionals that create advertising content do not have access to the data needed to predict their own future performance. Note that these predictions are often self-fulfilling: if, e.g., Meta models predict low click-through ratio (CTR) for your ad, Meta will charge you more for showing it, probably show it less, and the campaign will likely be a failure regardless of how accurate the CTR prediction has been [1]. Often, there is no practical way to control the cost of advertising; technically, if a platform decided to charge more money for an ad nothing could prevent them from doing so." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 427, + 295, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 427, + 295, + 536 + ], + "spans": [ + { + "bbox": [ + 50, + 427, + 295, + 536 + ], + "type": "text", + "content": "Moreover, even if marketing professionals could run the corresponding models, that would only be of modest help with their job, which is content creation. Suppose that a model tells you that your new ad is a bad match for your audience, and the expected CTR is low. How do you fix that? It cannot be a pure collaborative filtering model since it has to predict CTR for a new ad that has not been shown to users yet, but it is still an opaque model that maps your ad content into a latent representation via \"giant inscrutable matrices\". So all you can do even if you have such a model is to try and make a different ad, get a new prediction, and work via trial and error." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 537, + 295, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 537, + 295, + 624 + ], + "spans": [ + { + "bbox": [ + 50, + 537, + 295, + 624 + ], + "type": "text", + "content": "One potential way to address this issue involves visualizing the decision-making process of a neural network, providing marketers with insights into the rationale behind specific predictions made by AI models [3, 15, 20, 39]. Therefore, our first contribution in this work is a new variation of a state-of-the-art CTR prediction model coupled with a mechanism for analyzing the ad images (banners) via an image attention mechanism. The results provide human-understandable analysis that can be turned into actionable insights." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 624, + 295, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 624, + 295, + 701 + ], + "spans": [ + { + "bbox": [ + 50, + 624, + 295, + 701 + ], + "type": "text", + "content": "However, this is only the beginning. Individual ad analysis via explainable ML models has proven beneficial in scenarios such as individual content evaluation prior to starting an advertising campaign, but it is much less practical when applied to large volumes of images and text ads in real-world settings. The time constraints faced by marketers impede their ability to effectively process and extract key content traits in their own advertising practices." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 85, + 559, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 85, + 559, + 259 + ], + "spans": [ + { + "bbox": [ + 313, + 85, + 559, + 259 + ], + "type": "text", + "content": "In our opinion, the long-awaited revolution in digital advertising and content marketing will occur when both the ads themselves and the results of opaque models can be explained in ways that are both understandable for humans and actionable in terms of business results. We believe that the time for this revolution is now, and in this work we show that large language models such as GPT-3.5 [27] and GPT-4 [26] are already increasingly able to explain the \"reasoning\" behind recommender models and provide aggregate insights about advertising campaigns consisting of hundreds of individual ads. Prior to LLMs, approaches to aggregate text corpora in the context of recommender systems had been proposed via topic modeling [22, 25], sometimes coupled with deep learning [35] and user profiling [5, 34], but topic modeling is based on the bag-of-words assumption and cannot summarize text as an LLM does; visual understanding of ads had also been explored with convolutional networks [32]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 260, + 559, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 260, + 559, + 358 + ], + "spans": [ + { + "bbox": [ + 313, + 260, + 559, + 358 + ], + "type": "text", + "content": "Therefore, our main contribution is that we present preliminary results for a road-map that could achieve this holy grail of content marketing: provide explainable, actionable insights into advertising content along with possible strategies for improvement with models that could work on the side of a small advertising agency rather than a huge platform. We begin with direct CTR prediction and then proceed to provide explainable insights and content recommendations with large language models and even visual generative AI (see Fig. 1)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 359, + 559, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 359, + 559, + 425 + ], + "spans": [ + { + "bbox": [ + 313, + 359, + 559, + 425 + ], + "type": "text", + "content": "The paper is organized as follows: in Section 2, we present an improved model for CTR prediction and visualization procedures for advertising banners, Section 3 introduces our approach to explainable ad analysis with large language models, Section 4 shows the results of a case study that confirms the effectiveness of our approach, and Section 5 concludes the paper." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 441, + 541, + 464 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 441, + 541, + 464 + ], + "spans": [ + { + "bbox": [ + 314, + 441, + 541, + 464 + ], + "type": "text", + "content": "2 EXPLAINING OPAQUE AI WITH AI: CTR PREDICTION AND VISUALIZATIONS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 468, + 559, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 468, + 559, + 589 + ], + "spans": [ + { + "bbox": [ + 313, + 468, + 559, + 589 + ], + "type": "text", + "content": "The lack of transparency within the advertising sector has been widely acknowledged as a primary reason for the inefficient allocation of advertising budgets. Notably, the responsibility for determining the cost per 1,000 impressions (CPM) and selecting competing entities in a programmatic auction rests primarily with the platform (we will use Meta as the running example). This decision-making process is in fact a result of numerous intricately interwoven machine learning (ML) models designed to dynamically match content with precise targeting criteria and individualized user profiles on Meta. These models are instrumental in estimating the likelihood of a user engaging in specific actions within the Meta ecosystem." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 589, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 589, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 589, + 559, + 710 + ], + "type": "text", + "content": "As an illustration, consider a hypothetical Meta user named Simon who is anticipated to click on an ad (perform the \"Click\" action) with the slogan \"Up your game nights with an ultra-immersive setup\" displayed on a Meta Ad banner showcasing Singtel, a mobile operator company, and their home internet broadband product (Fig. 2). This prediction is done by Meta's internal ML models, and quite often contradicts Meta's widely publicized \"best practices\" blueprints [24]. Here, it is crucial to acknowledge the additional information that advertising engines such as Meta take into account. They are free to use factors such as Simon's past visits to telecom websites, pictures showing computer games in Simon's account" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 228, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 228, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 228, + 68 + ], + "type": "text", + "content": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 325, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 325, + 60, + 559, + 69 + ], + "type": "text", + "content": "Qi Yang, Marlo Ongpin, Sergey Nikolenko, Alfred Huang, and Aleksandr Farseev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 75, + 82, + 231, + 236 + ], + "blocks": [ + { + "bbox": [ + 75, + 82, + 231, + 236 + ], + "lines": [ + { + "bbox": [ + 75, + 82, + 231, + 236 + ], + "spans": [ + { + "bbox": [ + 75, + 82, + 231, + 236 + ], + "type": "image", + "image_path": "c4b3bb86fba7bd6e20ae39746b404a55331d25eab19f268ee82422c44c38dc6a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 56, + 239, + 250, + 251 + ], + "lines": [ + { + "bbox": [ + 56, + 239, + 250, + 251 + ], + "spans": [ + { + "bbox": [ + 56, + 239, + 250, + 251 + ], + "type": "text", + "content": "Figure 2: A sample advertising banner on Meta." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 258, + 295, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 258, + 295, + 400 + ], + "spans": [ + { + "bbox": [ + 50, + 258, + 295, + 400 + ], + "type": "text", + "content": "on Meta, and much more. Moreover, these factors include Meta's own revenue considerations, prediction of the ad's \"relevance\" by Meta itself, timing of displaying this ad during the day, recency of the ad account (to incentivize new advertisers with improved performance), and the internal \"ranking\" of advertisers based on their history of disapproved ads, a process overseen by Meta. Regrettably, these predictive estimations are further influenced by the accuracy of Meta's ML models that profile Simon's content. For instance, when Simon is observed putting a diaper on his child, Meta's object recognition system might mistakenly associate it with an \"Inflatable Boat / Fishing\" interest; this is a real-life incident on the Meta platform, and such mistakes compound into suboptimal ad-related predictions down the line." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 401, + 295, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 401, + 295, + 478 + ], + "spans": [ + { + "bbox": [ + 50, + 401, + 295, + 478 + ], + "type": "text", + "content": "Confronted with numerous intricate technical hurdles, digital marketers, who frequently lack technical expertise, often resort to intuitive judgment or a trial-and-error methodology in formulating and examining their creative assets within digital advertising platforms. Thus, it becomes especially important to have comprehensive data-driven guidance, not only for optimizing outcomes but also for developing cost-effective practices." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 479, + 295, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 479, + 295, + 609 + ], + "spans": [ + { + "bbox": [ + 50, + 479, + 295, + 609 + ], + "type": "text", + "content": "One classical approach to providing this kind of guidance is to train an ML framework to predict the prospective performance of an advertising banner before allocating actual advertising budgets. In this section, we focus on the prediction of the click-through rate (CTR) metric, known to be closely associated with ad performance, particularly in the context of awareness and traffic advertising objectives. We used the recently presented SoWide model [20] as a sample state-of-the-art CTR prediction approach; its architecture is shown in Fig. 3. We updated the architecture slightly by replacing the ABN model for image processing with a Vision Transformer (ViT) [10], resulting in performance improvements, so we call it SoWide-v2." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 609, + 295, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 609, + 295, + 708 + ], + "spans": [ + { + "bbox": [ + 50, + 609, + 295, + 708 + ], + "type": "text", + "content": "Unlike conventional supervised learning, where a data point " + }, + { + "bbox": [ + 50, + 609, + 295, + 708 + ], + "type": "inline_equation", + "content": "(\\mathbf{x},y)" + }, + { + "bbox": [ + 50, + 609, + 295, + 708 + ], + "type": "text", + "content": " consists of both feature vector " + }, + { + "bbox": [ + 50, + 609, + 295, + 708 + ], + "type": "inline_equation", + "content": "\\mathbf{x}" + }, + { + "bbox": [ + 50, + 609, + 295, + 708 + ], + "type": "text", + "content": " and target variable " + }, + { + "bbox": [ + 50, + 609, + 295, + 708 + ], + "type": "inline_equation", + "content": "y" + }, + { + "bbox": [ + 50, + 609, + 295, + 708 + ], + "type": "text", + "content": ", the SoWide-v2 approach incorporates data from the campaign, ad set, and potentially multiple creatives to construct the features for each ad. Data points in the model leverage text and images from all creatives together with their respective estimated performances; in case of videos, we extract keyframes to obtain multiple distinct images included as additional training data. Furthermore, we extract low-level features from tabular, textual, and visual content, resulting" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 276, + 82, + 545, + 236 + ], + "blocks": [ + { + "bbox": [ + 276, + 82, + 545, + 236 + ], + "lines": [ + { + "bbox": [ + 276, + 82, + 545, + 236 + ], + "spans": [ + { + "bbox": [ + 276, + 82, + 545, + 236 + ], + "type": "image", + "image_path": "112cfbfecb1cc833ca3409d9807c516bac7c5d357b277dc9d792da4090557401.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 343, + 238, + 477, + 249 + ], + "lines": [ + { + "bbox": [ + 343, + 238, + 477, + 249 + ], + "spans": [ + { + "bbox": [ + 343, + 238, + 477, + 249 + ], + "type": "text", + "content": "Figure 3: SoWide-v2 architecture" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 258, + 559, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 258, + 559, + 357 + ], + "spans": [ + { + "bbox": [ + 313, + 258, + 559, + 357 + ], + "type": "text", + "content": "in a comprehensive dataset that can be used to train a model capable of predicting content performance based on information from multiple modalities. After preprocessing, extracted features serve as inputs for the click-through rate (CTR) prediction model. SoWidev2 makes the assumption that the performance of advertisements converges to an underlying global distribution [8, 13, 30], so we normalize CTR values into categorical representations. Predicted scores indicate whether the content can be classified as \"below average\", \"average\", or \"above average\" in terms of quality." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 357, + 559, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 357, + 559, + 566 + ], + "spans": [ + { + "bbox": [ + 313, + 357, + 559, + 566 + ], + "type": "text", + "content": "In essence, SoWide-v2 is a neural network based on the \"wide and deep models\" approach well known in recommender systems [7]. To facilitate representation learning for multimodal content, SoWide-v2 employs separate embedding layers and fully connected layers for each set of features. This process allows it to project sparse, high-dimensional, and low-level features into higher-level representations. To handle each modality appropriately, SoWide-v2 employs distinct deep models for feature processing. Specifically, it uses the TabTransformer [21] for tabular features and multilingual BERT [9] for textual content; the original SoWide used the attention branch network [16] for images but for SoWide-v2 we replaced it with a Vision Transformer [10]. Additionally, a fully connected layer is utilized to project the sparse high-dimensional features into a denser low-dimensional representation. These representations are subsequently concatenated and fed into another fully connected layer, followed by a softmax function for CTR classification, facilitating end-to-end joint learning. The model is trained using stochastic gradient descent (SGD) for 100 epochs, and hyper-parameter optimization is performed with the tree-structured Parzen estimator [4]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 566, + 560, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 566, + 560, + 708 + ], + "spans": [ + { + "bbox": [ + 313, + 566, + 560, + 708 + ], + "type": "text", + "content": "For evaluation results, we use the same datasets and baselines as the original SoWide paper [20], comparing the performance of SoWide-v2 against the original SoWide and several conventional machine learning baselines (there appears to be no previous work on CTR prediction before [20] that could be used for a direct comparison) using the F1-score, a widely used classification metric. Evaluation is done in two different settings: for general ad campaigns and also specifically for campaigns targeting the \"Conversion\" objective, which represents the two most prevalent and significant ad campaign objectives. The results shown in Table 1 demonstrate that the SoWide-v2 model presents an improvement over the original SoWide, and both models significantly outperform all classical ML baselines. Notably, the F1-score for the general ad campaigns reaches 0.78," + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 320, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 320, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 320, + 69 + ], + "type": "text", + "content": "Against Opacity: Explainable AI and Large Language Models for Effective Digital Advertising" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 382, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 382, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 382, + 60, + 558, + 68 + ], + "type": "text", + "content": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 125, + 155, + 308 + ], + "blocks": [ + { + "bbox": [ + 58, + 83, + 154, + 114 + ], + "lines": [ + { + "bbox": [ + 58, + 83, + 154, + 114 + ], + "spans": [ + { + "bbox": [ + 58, + 83, + 154, + 114 + ], + "type": "text", + "content": "Table 1: F1-score evaluation for CTR prediction models." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 125, + 155, + 308 + ], + "lines": [ + { + "bbox": [ + 58, + 125, + 155, + 308 + ], + "spans": [ + { + "bbox": [ + 58, + 125, + 155, + 308 + ], + "type": "table", + "html": "
AllCon- vers- sion
k-nearest neighbors0.3380.254
Random forest0.3020.293
Gradient boosting0.3490.262
AdaBoost0.2890.277
Multilayer perceptron0.6540.642
SoWide0.7020.660
SoWide-v20.7800.671
", + "image_path": "d81a1b7e2888f389fc9dce32e0582f52aaa9238833b2c0fb8e377e88c7f887e1.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 175, + 85, + 249, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 85, + 249, + 95 + ], + "spans": [ + { + "bbox": [ + 175, + 85, + 249, + 95 + ], + "type": "text", + "content": "Content Analysis" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 175, + 99, + 255, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 99, + 255, + 106 + ], + "spans": [ + { + "bbox": [ + 175, + 99, + 255, + 106 + ], + "type": "text", + "content": "Average Predicted Score Low" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 175, + 110, + 212, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 110, + 212, + 117 + ], + "spans": [ + { + "bbox": [ + 175, + 110, + 212, + 117 + ], + "type": "inline_equation", + "content": "\\mathbb{N}^2" + }, + { + "bbox": [ + 175, + 110, + 212, + 117 + ], + "type": "text", + "content": " of Ads: 16" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 175, + 121, + 225, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 175, + 121, + 225, + 129 + ], + "spans": [ + { + "bbox": [ + 175, + 121, + 225, + 129 + ], + "type": "inline_equation", + "content": "\\mathsf{N}^{\\mathsf{g}}" + }, + { + "bbox": [ + 175, + 121, + 225, + 129 + ], + "type": "text", + "content": " of Creatives: 25" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 166, + 136, + 260, + 277 + ], + "blocks": [ + { + "bbox": [ + 166, + 136, + 260, + 277 + ], + "lines": [ + { + "bbox": [ + 166, + 136, + 260, + 277 + ], + "spans": [ + { + "bbox": [ + 166, + 136, + 260, + 277 + ], + "type": "image", + "image_path": "acf1d5dc66621283575e40bafc9c60cf3bb0955f77888301abc3921b2143422b.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 162, + 285, + 559, + 308 + ], + "lines": [ + { + "bbox": [ + 162, + 285, + 559, + 308 + ], + "spans": [ + { + "bbox": [ + 162, + 285, + 559, + 308 + ], + "type": "text", + "content": "Figure 4: Sample predicted low-CTR and high-CTR advertising banners and heatmap visualizations of the attention layers involved in the prediction." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 265, + 137, + 359, + 277 + ], + "blocks": [ + { + "bbox": [ + 265, + 137, + 359, + 277 + ], + "lines": [ + { + "bbox": [ + 265, + 137, + 359, + 277 + ], + "spans": [ + { + "bbox": [ + 265, + 137, + 359, + 277 + ], + "type": "image", + "image_path": "4ffae6c66addebde4791fb51721097dd80f7180f5efcc93a3f6e017e285a8ce0.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 361, + 137, + 458, + 277 + ], + "blocks": [ + { + "bbox": [ + 361, + 137, + 458, + 277 + ], + "lines": [ + { + "bbox": [ + 361, + 137, + 458, + 277 + ], + "spans": [ + { + "bbox": [ + 361, + 137, + 458, + 277 + ], + "type": "image", + "image_path": "4bd9708227b626fdfcf010ad44a602d119731127edb8f5742a99ca7b77df091b.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 460, + 137, + 559, + 277 + ], + "blocks": [ + { + "bbox": [ + 460, + 137, + 559, + 277 + ], + "lines": [ + { + "bbox": [ + 460, + 137, + 559, + 277 + ], + "spans": [ + { + "bbox": [ + 460, + 137, + 559, + 277 + ], + "type": "image", + "image_path": "1eaf6b5d2722b766c5e99af3c555f3538205c698b313ac8e7426daef3cae5cd9.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 318, + 295, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 318, + 295, + 372 + ], + "spans": [ + { + "bbox": [ + 50, + 318, + 295, + 372 + ], + "type": "text", + "content": "which confirms that the SoWide-v2 approach effectively accommodates the hierarchical structure inherent in advertising data, enabling effective multimodal learning for the prediction of ad performance. The results validate that SoWide-v2 is a state-of-the-art CTR prediction model." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 50, + 373, + 295, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 373, + 295, + 483 + ], + "spans": [ + { + "bbox": [ + 50, + 373, + 295, + 483 + ], + "type": "text", + "content": "Thus far, we have introduced a framework that enables advertisers to assess the potential performance of their own content, and potentially that of their competitors, prior to its launch. This represents a valuable tactical capability that had been unavailable to the community for a long time. However, once a creative marketer gains access to the initial prediction results for a specific content piece, another significant challenge lies in comprehending the underlying factors that contribute to its success or failure. What went wrong, what was done right, and how do we amplify the right parts while suppressing the wrong parts?" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 50, + 483, + 295, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 483, + 295, + 592 + ], + "spans": [ + { + "bbox": [ + 50, + 483, + 295, + 592 + ], + "type": "text", + "content": "One approach to address this question would be to utilize various visualization techniques, specifically those that illustrate the decision-making process of the neural network while making a specific prediction. If the prediction is accurate, such visualizations are believed to provide insights into the underlying reasons behind the performance of a creative asset. Consequently, these visualizations can serve as a valuable resource for marketers in making informed decisions regarding the inclusion of specific components in future creative assets, enabling them to effectively communicate their requirements to the creative team." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 50, + 592, + 295, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 592, + 295, + 702 + ], + "spans": [ + { + "bbox": [ + 50, + 592, + 295, + 702 + ], + "type": "text", + "content": "Figure 4 shows an illustrative example of such visualizations. The attention layers of the neural network used for CTR prediction are visualized as interactive heatmaps, revealing the specific regions of the banner that significantly influence the model's predictions. The figure shows that such attention visualization highlights the key elements within a Singtel banner (on the left) that contribute to its high predicted performance, namely gaming-related objects such as the monitor and the game controller. These elements effectively convey the message that a superior internet connection is essential for enhancing the gaming experience. Similarly, for the Circles.Life" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 318, + 559, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 318, + 559, + 373 + ], + "spans": [ + { + "bbox": [ + 313, + 318, + 559, + 373 + ], + "type": "text", + "content": "banner (on the right), the areas featuring the lady in the background were found to negatively impact its performance. This suggests that the composition and balance of the banner's visual elements, particularly in relation to the overall content creation practices, may have influenced its predicted low CTR values." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 384, + 533, + 422 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 384, + 533, + 422 + ], + "spans": [ + { + "bbox": [ + 314, + 384, + 533, + 422 + ], + "type": "text", + "content": "3 EXPLAINING HUMANS FOR HUMANS: SODA, A LLM-BASED ADVERTISING ANALYSIS FRAMEWORK" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 425, + 559, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 425, + 559, + 555 + ], + "spans": [ + { + "bbox": [ + 313, + 425, + 559, + 555 + ], + "type": "text", + "content": "In the last section, we presented a system capable of effectively capturing and visualizing the factors influencing the predicted performance of ads in terms of potential CTR. However, in domains such as performance marketing decisions for choosing specific creatives for campaigns often need to be made under tight deadlines, sometimes literally in a few hours or even minutes. Moreover, these industries are characterized by large volumes of creative assets and a multitude of promotions simultaneously conducted by competitors in an \"always-on\" manner. Therefore, one cannot run detailed analysis for every ad, and there is a dire need for further automated analytical tools that would enable human marketers to rapidly comprehend available data and information." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 556, + 559, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 556, + 559, + 666 + ], + "spans": [ + { + "bbox": [ + 313, + 556, + 559, + 666 + ], + "type": "text", + "content": "In order to address this challenge, we present an extension to our framework with a novel approach that leverages large language models (LLMs) to provide additional insights into the data and CTR predictions, called SODA. We outline an analytical pipeline that incorporates LLM-based explanations and generations and demonstrate its practical applications through a real-world scenario involving four Singapore telecommunication companies. This part of our framework aims to enhance the interpretability and comprehension of the data, facilitating better-informed decision-making in these fast-paced and competitive industries." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 666, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 666, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 666, + 559, + 710 + ], + "type": "text", + "content": "The general pipeline of our analysis is shown in Figure 5. First, we use an LLM to extract specific well-defined insights from input ads, such as the needs served by this ad, products being advertised, and more (see below); the insights can be stored as features in" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 229, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 229, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 229, + 68 + ], + "type": "text", + "content": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 325, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 325, + 60, + 559, + 69 + ], + "type": "text", + "content": "Qi Yang, Marlo Ongpin, Sergey Nikolenko, Alfred Huang, and Aleksandr Farseev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 60, + 82, + 202, + 430 + ], + "blocks": [ + { + "bbox": [ + 60, + 82, + 202, + 430 + ], + "lines": [ + { + "bbox": [ + 60, + 82, + 202, + 430 + ], + "spans": [ + { + "bbox": [ + 60, + 82, + 202, + 430 + ], + "type": "image", + "image_path": "40419723bf25fd8c75bdcd9f0b89e0e34c20da748523b3877ae518fe1cc38457.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 61, + 437, + 201, + 460 + ], + "lines": [ + { + "bbox": [ + 61, + 437, + 201, + 460 + ], + "spans": [ + { + "bbox": [ + 61, + 437, + 201, + 460 + ], + "type": "text", + "content": "Figure 5: General pipeline of our LLM-based analysis" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 216, + 82, + 564, + 192 + ], + "blocks": [ + { + "bbox": [ + 216, + 82, + 564, + 192 + ], + "lines": [ + { + "bbox": [ + 216, + 82, + 564, + 192 + ], + "spans": [ + { + "bbox": [ + 216, + 82, + 564, + 192 + ], + "type": "image", + "image_path": "f8147ac1545644272b718d70da3125c80dd591c88d270d8ca53e061febc8d9b0.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 331, + 196, + 449, + 208 + ], + "lines": [ + { + "bbox": [ + 331, + 196, + 449, + 208 + ], + "spans": [ + { + "bbox": [ + 331, + 196, + 449, + 208 + ], + "type": "text", + "content": "Figure 6: Sample ad analysis" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 219, + 213, + 561, + 342 + ], + "blocks": [ + { + "bbox": [ + 219, + 213, + 561, + 342 + ], + "lines": [ + { + "bbox": [ + 219, + 213, + 561, + 342 + ], + "spans": [ + { + "bbox": [ + 219, + 213, + 561, + 342 + ], + "type": "image", + "image_path": "78bab7b34c60a5323f9d775cdbd3ac401be8ac157a094161dd56c211689b0508.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 292, + 343, + 488, + 354 + ], + "lines": [ + { + "bbox": [ + 292, + 343, + 488, + 354 + ], + "spans": [ + { + "bbox": [ + 292, + 343, + 488, + 354 + ], + "type": "text", + "content": "Figure 7: Sample brand persona analysis results" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 219, + 359, + 561, + 448 + ], + "blocks": [ + { + "bbox": [ + 219, + 359, + 561, + 448 + ], + "lines": [ + { + "bbox": [ + 219, + 359, + 561, + 448 + ], + "spans": [ + { + "bbox": [ + 219, + 359, + 561, + 448 + ], + "type": "image", + "image_path": "c3822a688e66daeb84d3b44532b2a359cfc5d46323c77f761cbffddcf11b66aa.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 52, + 464, + 559, + 599 + ], + "blocks": [ + { + "bbox": [ + 283, + 450, + 497, + 461 + ], + "lines": [ + { + "bbox": [ + 283, + 450, + 497, + 461 + ], + "spans": [ + { + "bbox": [ + 283, + 450, + 497, + 461 + ], + "type": "text", + "content": "Figure 8: Sample brand comparative analysis results" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 52, + 464, + 559, + 599 + ], + "lines": [ + { + "bbox": [ + 52, + 464, + 559, + 599 + ], + "spans": [ + { + "bbox": [ + 52, + 464, + 559, + 599 + ], + "type": "image", + "image_path": "b3e7aeef26d19972d49e9be7539552e1ba549f10a543d21e3e3e05c7533dc5e6.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 203, + 602, + 406, + 614 + ], + "lines": [ + { + "bbox": [ + 203, + 602, + 406, + 614 + ], + "spans": [ + { + "bbox": [ + 203, + 602, + 406, + 614 + ], + "type": "text", + "content": "Figure 9: Sample user persona generation results." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 621, + 296, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 621, + 296, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 621, + 296, + 710 + ], + "type": "text", + "content": "tabular form. Then, we use these features together with further engineered prompts to perform generalizing analysis of a brand's target audiences, personas, needs, and insights expressed by the ads, tone, and topical categories of the current campaign and others. The resulting coverage of the campaign closely reflects campaign analysis commonly performed by marketing professionals and can be further used to tune the brand's message, tone, target audiences, personas, and more. The pipeline is also able to present" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 621, + 560, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 621, + 560, + 654 + ], + "spans": [ + { + "bbox": [ + 313, + 621, + 560, + 654 + ], + "type": "text", + "content": "specific examples helpful for marketing professionals, such as sample (imagined) user profiles or user personas, which are also one of the common marketing tools. Let us dive into some details." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 654, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 654, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 654, + 559, + 710 + ], + "type": "text", + "content": "Figure 6 shows sample results of our initial experiments on ad analysis. We selected batches of ads from the Facebook Ad Library for the same brand and processed them with an LLM, customized only with natural language prompt engineering. As a result, the LLM has been able to successfully identify key features of each" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 321, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 321, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 321, + 69 + ], + "type": "text", + "content": "Against Opacity: Explainable AI and Large Language Models for Effective Digital Advertising" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 383, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 383, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 383, + 60, + 558, + 68 + ], + "type": "text", + "content": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 294, + 183 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 294, + 183 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 294, + 183 + ], + "type": "text", + "content": "advertisement, including excellent responses to such seemingly \"human\" questions as identifying the human need, human insight, and the main archetypes used in an ad. Moreover, answers to most questions are standardized (as the LLM was instructed) and can be subject to automated processing. This kind of analysis has always been a key part of online marketing, and to the best of our knowledge, it has never been successfully automated and scaled up before. Such tasks had always required human labeling and thus had been restricted to a few sample ads rather than the entire dataset." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 183, + 294, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 183, + 294, + 281 + ], + "spans": [ + { + "bbox": [ + 50, + 183, + 294, + 281 + ], + "type": "text", + "content": "As the next step, we use the ads and extracted features as inputs for a number of prompts asking to summarize information in a variety of formats commonly used in content marketing. We have seen successful summarization across the board, with important insights identified by the LLM and presented in an accessible and actionable format. Fig. 7 shows a sample result of our brand persona analysis, complete with main brand values used in the ad campaigns, the goals of using them, and detailed analysis of the primary \"caregiver\" persona, including supporting examples from the data." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 282, + 294, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 282, + 294, + 337 + ], + "spans": [ + { + "bbox": [ + 50, + 282, + 294, + 337 + ], + "type": "text", + "content": "Figure 8 shows the results of a comparative analysis of four advertising campaigns run over the same time period by different brands. Again, the LLM has correctly identified its key distinguishing factors, and the list of differences is very similar to one that could be produced by a human marketing professional." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 337, + 295, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 337, + 295, + 501 + ], + "spans": [ + { + "bbox": [ + 50, + 337, + 295, + 501 + ], + "type": "text", + "content": "Another avenue for using state-of-the-art generative AI capabilities that we have explored is user persona generation, an important tool in content marketing that has long proven to be useful for creative work[12, 14, 40]. To produce user personas, we begin with a list of interests (either extracted as shown in Fig. 9 or obtained from the client and/or social media platform) and prompt the LLM to give examples of user descriptions that could fit such interests. Fig. 7 shows a sample resulting user persona, which is fully believable to the professionals. To make the result even more tangible, we supplement such user personas with images generated by a state-of-the-art text-image model, in this case, Stable Diffusion [31]. To make the entire pipeline self-contained we ask the original LLM to also generate the prompt for the text-image model from the user persona description and a few examples of good prompts. The results also illustrated in Fig. 7, are very promising." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 501, + 295, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 501, + 295, + 578 + ], + "spans": [ + { + "bbox": [ + 50, + 501, + 295, + 578 + ], + "type": "text", + "content": "The LLM used in all experiments was ChatGPT based on GPT-3.5 [27], and we believe that simply switching to more powerful LLMs such as GPT-4 [26] may lead to further increased performance across all applications. Note also that while GPT-3.5 can only process text ads, GPT-4 is already able to analyze images jointly with text (this ability has not yet been made public at the time of writing), which is arguably even more important for content marketing." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 596, + 137, + 607 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 596, + 137, + 607 + ], + "spans": [ + { + "bbox": [ + 51, + 596, + 137, + 607 + ], + "type": "text", + "content": "4 CASE STUDY" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 610, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 610, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 610, + 295, + 710 + ], + "type": "text", + "content": "To evaluate the practical value and viability of the proposed framework expansion using large language models (LLMs) for generating rapid insights and enabling prompt marketing-related decision-making, we have engaged 12 marketing professionals currently employed at marketing departments of Business-to-Consumer (B2C) brands or advertising and marketing agencies across Singapore, China, and the UK. These professionals were selected based on their extensive experience, averaging 9 years, in managing digital marketing campaigns across various industries." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 84, + 559, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 84, + 559, + 150 + ], + "spans": [ + { + "bbox": [ + 313, + 84, + 559, + 150 + ], + "type": "text", + "content": "The professionals were presented with the details and results of our preliminary experiments analyzing and comparing the marketing campaigns of four major telecommunication companies in Singapore, as described in Section 3. They were then asked to provide their perspectives on the usefulness, quality, and potential impact of the insights and outputs generated by our framework." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 150, + 559, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 150, + 559, + 227 + ], + "spans": [ + { + "bbox": [ + 313, + 150, + 559, + 227 + ], + "type": "text", + "content": "All 12 professionals responded very positively about the value of our approach. They found high-level overviews of brand positioning and audience targeting strategies, enriched with specific examples, to be highly useful to gain quick familiarity with brand messaging and inspire new creative directions. The generated user personas and accompanying AI-generated images were praised for bringing additional richness and tangibility to the insights." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 227, + 558, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 227, + 558, + 303 + ], + "spans": [ + { + "bbox": [ + 313, + 227, + 558, + 303 + ], + "type": "text", + "content": "Several professionals commented that the coherent, standardized format of the outputs would allow for efficient processing and decision-making, especially given the tight timeframes frequently faced in the industry. More senior professionals have expressed that they foresee solutions like ours significantly augmenting and accelerating essential marketing functions through the automation of repetitive, labor-intensive tasks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 304, + 559, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 304, + 559, + 425 + ], + "spans": [ + { + "bbox": [ + 313, + 304, + 559, + 425 + ], + "type": "text", + "content": "This highly encouraging feedback from advertising and marketing professionals suggests strong potential business value in developing and applying AI-powered solutions, such as the proposed extension of our framework, for the automation of marketing campaign analysis and strategic planning. While adoption may face initial resistance, especially from very senior professionals, many in the industry seem poised to welcome AI augmenting and enhancing their work. Our approach, focused on mimicking established human processes and outputs, appears well-suited to addressing common pain points and unlocking new efficiencies, especially in such fast-paced domains as performance marketing." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 449, + 406, + 460 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 449, + 406, + 460 + ], + "spans": [ + { + "bbox": [ + 315, + 449, + 406, + 460 + ], + "type": "text", + "content": "5 CONCLUSION" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 464, + 559, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 464, + 559, + 639 + ], + "spans": [ + { + "bbox": [ + 313, + 464, + 559, + 639 + ], + "type": "text", + "content": "In this work, we have presented a novel advertising analysis framework, called SODA, which amalgamates large language models, explainable artificial intelligence, and attention map visualization techniques, heralding a potential future of human-AI collaboration within the realm of digital advertising. Through the integration of LLMs and the incorporation of explainability aspects, our novel approach envisions enhanced efficiency and synergy between marketers and AI systems, hopefully leading to a new era of intelligent decision-making. We believe that our approach holds the promise of empowering a new generation of marketers to leverage advanced AI technologies effectively, fostering a deeper understanding of the underlying mechanisms driving ad performance and facilitating informed decision-making processes. Note that while we already show promising results, these are mostly preliminary experiments, and we strongly believe that this direction of research will bring many new advances in the nearest future." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 673, + 454, + 683 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 673, + 454, + 683 + ], + "spans": [ + { + "bbox": [ + 314, + 673, + 454, + 683 + ], + "type": "text", + "content": "6 ACKNOWLEDGEMENT" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 687, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 687, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 687, + 559, + 710 + ], + "type": "text", + "content": "This work was funded by the Russian Science Foundation grant No 22-11-00135 https://rscf.ru/en/project/22-11-00135/" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 228, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 228, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 228, + 68 + ], + "type": "text", + "content": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 325, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 325, + 60, + 558, + 69 + ], + "type": "text", + "content": "Qi Yang, Marlo Ongpin, Sergey Nikolenko, Alfred Huang, and Aleksandr Farseev" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 83, + 124, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 83, + 124, + 95 + ], + "spans": [ + { + "bbox": [ + 52, + 83, + 124, + 95 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 98, + 295, + 704 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 56, + 98, + 294, + 113 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 98, + 294, + 113 + ], + "spans": [ + { + "bbox": [ + 56, + 98, + 294, + 113 + ], + "type": "text", + "content": "[1] [n.d.]. Best practices to potentially reduce cost per result for Meta ads. https://www.facebook.com/business/help/321695409726523." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 114, + 295, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 114, + 295, + 129 + ], + "spans": [ + { + "bbox": [ + 56, + 114, + 295, + 129 + ], + "type": "text", + "content": "[2] [n.d.]. Netflix Prize data. https://www.kaggle.com/datasets/netflix-inc/netflix-prize-data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 129, + 294, + 160 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 129, + 294, + 160 + ], + "spans": [ + { + "bbox": [ + 56, + 129, + 294, + 160 + ], + "type": "text", + "content": "[3] Anton Alekseev, Elena Tutubalina, Sejeong Kwon, and Sergey Nikolenko. 2022. Near-Zero-Shot Suggestion Mining with a Little Help from WordNet. In Analysis of Images, Social Networks and Texts. Springer International Publishing, Cham, 23-36." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 161, + 294, + 185 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 161, + 294, + 185 + ], + "spans": [ + { + "bbox": [ + 56, + 161, + 294, + 185 + ], + "type": "text", + "content": "[4] J. Bergstra, R. Bardenet, Y. Bengio, and B. Kegl. 2011. Algorithms for HyperParameter Optimization. In Advances in Neural Information Processing Systems, Vol. 24. Curran Associates, Inc." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 186, + 294, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 186, + 294, + 201 + ], + "spans": [ + { + "bbox": [ + 56, + 186, + 294, + 201 + ], + "type": "text", + "content": "[5] K Buraya, A Farseev, and A Filchenkov. 2018. Multi-view personality profiling based on longitudinal data. Lecture Notes in Computer Science 11018 (2018), 15-27." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 201, + 294, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 201, + 294, + 224 + ], + "spans": [ + { + "bbox": [ + 56, + 201, + 294, + 224 + ], + "type": "text", + "content": "[6] Jiawei Chen, Hande Dong, Xiang Wang, Fuli Feng, Meng Wang, and Xiangnan He. 2023. Bias and Debias in Recommender System: A Survey and Future Directions. ACM Trans. Inf. Syst. 41, 3, Article 67 (feb 2023), 39 pages." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 225, + 294, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 225, + 294, + 264 + ], + "spans": [ + { + "bbox": [ + 56, + 225, + 294, + 264 + ], + "type": "text", + "content": "[7] H.-T. Cheng, L. Koc, J. Harmsen, T. Shaked, T. Chandra, H. Aradhye, G. Anderson, G. Corrado, W. Chai, M. Ispir, R. Anil, Z. Haque, L. Hong, V. Jain, X. Liu, and H. Shah. 2016. Wide & Deep Learning for Recommender Systems. In Proc. 1st Workshop on Deep Learning for Recommender Systems (Boston, MA, USA) (DLRS 2016). ACM, New York, NY, USA, 7-10." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 265, + 294, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 265, + 294, + 304 + ], + "spans": [ + { + "bbox": [ + 56, + 265, + 294, + 304 + ], + "type": "text", + "content": "[8] Alok Kumar Chowdhury, Aleksandr Farseev, Prithwi Raj Chakraborty, Dian Tjondronegoro, and Vinod Chandran. 2017. Automatic classification of physical exercises from wearable sensors using small dataset from non-laboratory settings. In 2017 IEEE Life Sciences Conference (LSC). 111-114. https://doi.org/10.1109/LSC.2017.8268156" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 304, + 294, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 304, + 294, + 328 + ], + "spans": [ + { + "bbox": [ + 56, + 304, + 294, + 328 + ], + "type": "text", + "content": "[9] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proc. 2019 NAACL. ACL, 4171-4186." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 329, + 294, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 329, + 294, + 360 + ], + "spans": [ + { + "bbox": [ + 53, + 329, + 294, + 360 + ], + "type": "text", + "content": "[10] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. 2020. An image is worth " + }, + { + "bbox": [ + 53, + 329, + 294, + 360 + ], + "type": "inline_equation", + "content": "16 \\times 16" + }, + { + "bbox": [ + 53, + 329, + 294, + 360 + ], + "type": "text", + "content": " words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929 (2020)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 361, + 294, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 361, + 294, + 400 + ], + "spans": [ + { + "bbox": [ + 53, + 361, + 294, + 400 + ], + "type": "text", + "content": "[11] Yali Du, Yinwei Wei, Wei Ji, Fan Liu, Xin Luo, and Liqiang Nie. 2023. Multi-Queue Momentum Contrast for Microvideo-Product Retrieval. In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining (Singapore, Singapore) (WSDM '23). ACM, New York, NY, USA, 1003–1011. https://doi.org/10.1145/3539597.3570405" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 401, + 294, + 439 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 401, + 294, + 439 + ], + "spans": [ + { + "bbox": [ + 53, + 401, + 294, + 439 + ], + "type": "text", + "content": "[12] Aleksandr Farseev. 2023. Under the Hood of Social Media Advertising: How Do We Use AI Responsibly for Advertising Targeting and Creative Evaluation. In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining (Singapore, Singapore) (WSDM '23). ACM, New York, NY, USA, 1281-1282. https://doi.org/10.1145/3539597.3575791" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 53, + 441, + 294, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 441, + 294, + 464 + ], + "spans": [ + { + "bbox": [ + 53, + 441, + 294, + 464 + ], + "type": "text", + "content": "[13] A Farseev, N Gukov, I Gossoudarev, and U Zarichnyak. 2014. Cross-platform online venue and user community recommendation based upon social networks data mining. Computer Instruments in Education 6 (2014), 28-38." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 53, + 464, + 294, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 464, + 294, + 503 + ], + "spans": [ + { + "bbox": [ + 53, + 464, + 294, + 503 + ], + "type": "text", + "content": "[14] Aleksandr Farseev, Kirill Lepikhin, Hendrik Schwartz, Eu Khoon Ang, and Kenny Powar. 2018. SoMin.AI: Social Multimedia Influencer Discovery Marketplace. In Proceedings of the 26th ACM International Conference on Multimedia (Seoul, Republic of Korea) (MM '18). ACM, New York, NY, USA, 1234-1236. https://doi.org/10.1145/3240508.3241387" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 53, + 504, + 294, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 504, + 294, + 544 + ], + "spans": [ + { + "bbox": [ + 53, + 504, + 294, + 544 + ], + "type": "text", + "content": "[15] Aleksandr Farseev, Qi Yang, Andrey Filchenkov, Kirill Lepikhin, Yu-Yi Chu-Farseeva, and Daron-Benjamin Loo. 2021. SoMin.Ai: Personality-Driven Content Generation Platform. In Proceedings of the 14th ACM International Conference on Web Search and Data Mining (WSDM '21). ACM, New York, NY, USA, 890-893. https://doi.org/10.1145/3437963.3441714" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 53, + 544, + 294, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 544, + 294, + 567 + ], + "spans": [ + { + "bbox": [ + 53, + 544, + 294, + 567 + ], + "type": "text", + "content": "[16] H. Fukui, T. Hirakawa, T. Yamashita, and H. Fujiyoshi. 2019. Attention Branch Network: Learning of Attention Mechanism for Visual Explanation. Computer Vision and Pattern Recognition (2019), 10705-10714." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 53, + 568, + 294, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 568, + 294, + 591 + ], + "spans": [ + { + "bbox": [ + 53, + 568, + 294, + 591 + ], + "type": "text", + "content": "[17] T. Ge, H. Liu, P. Yi, S. Huang, Z. Zhang, X. Zhu, Y. Zhang, K. Gai, L. Zhao, G. Zhou, K. Chen, S. Liu, H. Yi, Z. Hu, B. Liu, and P. Sun. 2018. Image Matters: Visually Modeling User Behaviors Using Advanced Model Server. 2087-2095." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 53, + 592, + 294, + 631 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 592, + 294, + 631 + ], + "spans": [ + { + "bbox": [ + 53, + 592, + 294, + 631 + ], + "type": "text", + "content": "[18] Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu, and Tat-Seng Chua. 2017. Neural Collaborative Filtering. In Proceedings of the 26th International Conference on World Wide Web (Perth, Australia) (WWW'17). International World Wide Web Conferences Steering Committee, Republic and Canton of Geneva, CHE, 173-182. https://doi.org/10.1145/3038912.3052569" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 53, + 632, + 294, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 632, + 294, + 663 + ], + "spans": [ + { + "bbox": [ + 53, + 632, + 294, + 663 + ], + "type": "text", + "content": "[19] X. He, J. Pan, O. Jin, T. Xu, B. Liu, T. Xu, Y. Shi, A. Atallah, R. Herbrich, S. Bowers, and J. Q. Candela. 2014. Practical Lessons from Predicting Clicks on Ads at Facebook. In Proc. 8th International Workshop on Data Mining for Online Advertising (ADKDD'14). ACM, 1-9." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 53, + 664, + 294, + 704 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 664, + 294, + 704 + ], + "spans": [ + { + "bbox": [ + 53, + 664, + 294, + 704 + ], + "type": "text", + "content": "[20] Alfred Huang, Qi Yang, Sergey Nikolenko, Marlo Ongpin, Ilia Gossoudarev, Ngoc Yen Duong, Kirill Lepikhin, Sergey Vishnyakov, Yuyi Chu-Farseeva, and Aleksandr Farseev. 2023. SoCraft: Advertiser-Level Predictive Scoring for Creative Performance on Meta. In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining (Singapore, Singapore) (WSDM '23). ACM," + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 317, + 87, + 558, + 700 + ], + "type": "list", + "angle": 0, + "index": 45, + "blocks": [ + { + "bbox": [ + 331, + 87, + 543, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 87, + 543, + 95 + ], + "spans": [ + { + "bbox": [ + 331, + 87, + 543, + 95 + ], + "type": "text", + "content": "New York, NY, USA, 1132-1135. https://doi.org/10.1145/3539597.3573032" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 95, + 558, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 95, + 558, + 110 + ], + "spans": [ + { + "bbox": [ + 317, + 95, + 558, + 110 + ], + "type": "text", + "content": "[21] X. Huang, A. Khetan, M. Cvitkovic, and Z. Karnin. 2020. TabTransformer: Tabular Data Modeling Using Contextual Embeddings. (2020). arXiv:2012.06678 [cs.LG]" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 111, + 558, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 111, + 558, + 150 + ], + "spans": [ + { + "bbox": [ + 317, + 111, + 558, + 150 + ], + "type": "text", + "content": "[22] Sergei Koltcov, Olessia Koltsova, and Sergey Nikolenko. 2014. Latent Dirichlet Allocation: Stability and Applications to Studies of User-Generated Content. In Proceedings of the 2014 ACM Conference on Web Science (Bloomington, Indiana, USA) (WebSci '14). ACM, New York, NY, USA, 161–165. https://doi.org/10.1145/2615569.2615680" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 151, + 558, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 151, + 558, + 182 + ], + "spans": [ + { + "bbox": [ + 317, + 151, + 558, + 182 + ], + "type": "text", + "content": "[23] H. B. McMahan, G. Holt, D. Sculley, M. Young, D. Ebner, J. Grady, L. Nie, T. Phillips, E. Davydov, D. Golovin, S. Chikkerur, D. Liu, M. Wattenberg, A. M. Hrafinkelsson, T. Boulos, and J. Kubica. 2013. Ad Click Prediction: A View from the Trenches. In Proc. 19th ACM SIGKDD (KDD '13). ACM, 1222-1230." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 183, + 558, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 183, + 558, + 198 + ], + "spans": [ + { + "bbox": [ + 317, + 183, + 558, + 198 + ], + "type": "text", + "content": "[24] Meta. 2023. Meta Blueprint. https://www.facebookblueprint.com/student/catalog Accessed on June 06, 2023." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 198, + 558, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 198, + 558, + 230 + ], + "spans": [ + { + "bbox": [ + 317, + 198, + 558, + 230 + ], + "type": "text", + "content": "[25] Sergey Nikolenko. 2015. SVD-LDA: Topic Modeling for Full-Text Recommender Systems. In Advances in Artificial Intelligence and Its Applications, Odbulia Pichardo Lagunas, Oscar Herrera Alcantara, and Gustavo Arroyo Figueroa (Eds.). Springer International Publishing, Cham, 67-79." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 231, + 519, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 231, + 519, + 239 + ], + "spans": [ + { + "bbox": [ + 317, + 231, + 519, + 239 + ], + "type": "text", + "content": "[26] OpenAI. 2023. GPT-4 Technical Report. arXiv:2303.08774 [cs.CL]" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 317, + 239, + 558, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 239, + 558, + 277 + ], + "spans": [ + { + "bbox": [ + 317, + 239, + 558, + 277 + ], + "type": "text", + "content": "[27] Long Ouyang, Jeff Wu, Xu Jiang, Diogo Almeida, Carroll L. Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, John Schulman, Jacob Hilton, Fraser Kelton, Luke Miller, Maddie Simens, Amanda Askell, Peter Welinder, Paul Christiano, Jan Leike, and Ryan Lowe. 2022. Training language models to follow instructions with human feedback. arXiv:2203.02155 [cs.CL]" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 317, + 278, + 558, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 278, + 558, + 301 + ], + "spans": [ + { + "bbox": [ + 317, + 278, + 558, + 301 + ], + "type": "text", + "content": "[28] Wentao Ouyang, Xiwu Zhang, Shukui Ren, Chao Qi, Zhaojie Liu, and Yanlong Du. 2019. Representation Learning-Assisted Click-Through Rate Prediction. In Proc. 28th IJCAI, 4561-4567. https://doi.org/10.24963/ijcai.2019/634" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 317, + 302, + 558, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 302, + 558, + 317 + ], + "spans": [ + { + "bbox": [ + 317, + 302, + 558, + 317 + ], + "type": "text", + "content": "[29] Francesco Ricci, Lior Rokach, Bracha Shapira, and Paul B. Kantor. 2010. Recommender Systems Handbook (1st ed.). Springer-Verlag, Berlin, Heidelberg." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 317, + 318, + 558, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 318, + 558, + 341 + ], + "spans": [ + { + "bbox": [ + 317, + 318, + 558, + 341 + ], + "type": "text", + "content": "[30] Matthew Richardson, Ewa Dominowska, and Robert Ragno. 2007. Predicting Clicks: Estimating the Click-through Rate for New Ads. In Proc. 16th WWWW (WWW '07), ACM, 521-530." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 317, + 342, + 558, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 342, + 558, + 373 + ], + "spans": [ + { + "bbox": [ + 317, + 342, + 558, + 373 + ], + "type": "text", + "content": "[31] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. 2022. High-Resolution Image Synthesis With Latent Diffusion Models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). 10684-10695." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 317, + 374, + 558, + 421 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 374, + 558, + 421 + ], + "spans": [ + { + "bbox": [ + 317, + 374, + 558, + 421 + ], + "type": "text", + "content": "[32] Andrey Savchenko, Anton Alekseev, Sejeong Kwon, Elena Tutubalina, Evgeny Myasnikov, and Sergey Nikolenko. 2020. Ad Lingua: Text Classification Improves Symbolism Prediction in Image Advertisements. In Proceedings of the 28th International Conference on Computational Linguistics. International Committee on Computational Linguistics, Barcelona, Spain (Online), 1886-1892. https://doi.org/10.18653/v1/2020.coling-main.171" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 317, + 422, + 558, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 422, + 558, + 460 + ], + "spans": [ + { + "bbox": [ + 317, + 422, + 558, + 460 + ], + "type": "text", + "content": "[33] Ilya Shenbin, Anton Alekseev, Elena Tutubalina, Valentin Malykh, and Sergey I. Nikolenko. 2020. RecVAE: A New Variational Autoencoder for Top-N Recommendations with Implicit Feedback. In Proceedings of the 13th International Conference on Web Search and Data Mining (Houston, TX, USA) (WSDM '20). ACM, New York, NY, USA, 528-536. https://doi.org/10.1145/3336191.3371831" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 317, + 461, + 558, + 484 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 461, + 558, + 484 + ], + "spans": [ + { + "bbox": [ + 317, + 461, + 558, + 484 + ], + "type": "text", + "content": "[34] Elena Tutubalina and Sergey I. Nikolenko. 2017. Demographic Prediction based on User Reviews about Medications. Computación y sistemas 21, 2 (2017), 227-241." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 317, + 485, + 558, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 485, + 558, + 509 + ], + "spans": [ + { + "bbox": [ + 317, + 485, + 558, + 509 + ], + "type": "text", + "content": "[35] Elena Tutubalina and Sergey I. Nikolenko. 2018. Exploring convolutional neural networks and topic models for user profiling from drug reviews. Multimedia Tools and Applications 77, 4 (2018), 4791-4809." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 317, + 510, + 558, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 510, + 558, + 548 + ], + "spans": [ + { + "bbox": [ + 317, + 510, + 558, + 548 + ], + "type": "text", + "content": "[36] Wenjie Wang, Fuli Feng, Liqiang Nie, and Tat-Seng Chua. 2022. User-Controllable Recommendation Against Filter Bubbles. In Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval (Madrid, Spain) (SIGIR '22). ACM, New York, NY, USA, 1251-1261. https://doi.org/10.1145/3477495.3532075" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 317, + 549, + 558, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 549, + 558, + 589 + ], + "spans": [ + { + "bbox": [ + 317, + 549, + 558, + 589 + ], + "type": "text", + "content": "[37] Qi Yang, Aleksandr Farseev, and Andrey Filchenkov. 2021. Two-Faced Humans on Twitter and Facebook: Harvesting Social Multimedia for Human Personality Profiling. In Proceedings of the 2021 Workshop on Intelligent Cross-Data Analysis and Retrieval (Taipei, Taiwan) (ICDAR '21). ACM, New York, NY, USA, 39-47. https://doi.org/10.1145/3463944.3469270" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 317, + 590, + 558, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 590, + 558, + 620 + ], + "spans": [ + { + "bbox": [ + 317, + 590, + 558, + 620 + ], + "type": "text", + "content": "[38] Qi Yang, Aleksandr Farseev, Sergey Nikolenko, and Andrey Filchenkov. 2022. Do we behave differently on Twitter and Facebook: Multi-view social network user personality profiling for content recommendation. Frontiers in Big Data 5 (2022). https://doi.org/10.3389/fdata.2022.931206" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 317, + 621, + 558, + 660 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 621, + 558, + 660 + ], + "spans": [ + { + "bbox": [ + 317, + 621, + 558, + 660 + ], + "type": "text", + "content": "[39] Qi Yang, Sergey Nikolenko, Alfred Huang, and Aleksandr Farseev. 2022. Personality-Driven Social Multimedia Content Recommendation. In Proceedings of the 30th ACM International Conference on Multimedia (Lisboa, Portugal) (MM '22). ACM, New York, NY, USA, 7290-7299. https://doi.org/10.1145/3503161.3548769" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 317, + 661, + 558, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 661, + 558, + 700 + ], + "spans": [ + { + "bbox": [ + 317, + 661, + 558, + 700 + ], + "type": "text", + "content": "[40] Qi Yang, Christos Tzelepis, Sergey Nikolenko, Ioannis Patras, and Aleksandr Farseev. 2023. \"Just To See You Smile\": SMILEY, a Voice-Guided GUY GAN. In Proceedings of the Sixteenth ACM International Conference on Web Search and Data Mining (Singapore, Singapore) (WSDM '23). ACM, New York, NY, USA, 1196-1199. https://doi.org/10.1145/3539597.3573031" + } + ] + } + ], + "index": 44 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 320, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 320, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 320, + 69 + ], + "type": "text", + "content": "Against Opacity: Explainable AI and Large Language Models for Effective Digital Advertising" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 383, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 383, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 383, + 60, + 558, + 68 + ], + "type": "text", + "content": "MM '23, October 29-November 3, 2023, Ottawa, ON, Canada" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file